From a3c760271943e075b2cd60c153e9f728361ce6b5 Mon Sep 17 00:00:00 2001 From: Christopher Fitzner Date: Mon, 12 Aug 2024 15:18:20 -0700 Subject: [PATCH] Update everything else --- go.mod | 20 +- go.sum | 44 +- vendor/github.com/BurntSushi/toml/.gitignore | 2 + vendor/github.com/BurntSushi/toml/COPYING | 21 + vendor/github.com/BurntSushi/toml/README.md | 120 ++ vendor/github.com/BurntSushi/toml/decode.go | 602 +++++++ .../BurntSushi/toml/decode_go116.go | 19 + .../github.com/BurntSushi/toml/deprecated.go | 21 + vendor/github.com/BurntSushi/toml/doc.go | 11 + vendor/github.com/BurntSushi/toml/encode.go | 750 +++++++++ vendor/github.com/BurntSushi/toml/error.go | 279 ++++ .../github.com/BurntSushi/toml/internal/tz.go | 36 + vendor/github.com/BurntSushi/toml/lex.go | 1233 ++++++++++++++ vendor/github.com/BurntSushi/toml/meta.go | 121 ++ vendor/github.com/BurntSushi/toml/parse.go | 781 +++++++++ .../github.com/BurntSushi/toml/type_fields.go | 242 +++ .../github.com/BurntSushi/toml/type_toml.go | 70 + .../bmatcuk/doublestar/v4/.codecov.yml | 10 + .../bmatcuk/doublestar/v4/.gitignore | 32 + .../github.com/bmatcuk/doublestar/v4/LICENSE | 22 + .../bmatcuk/doublestar/v4/README.md | 402 +++++ .../bmatcuk/doublestar/v4/UPGRADING.md | 63 + .../bmatcuk/doublestar/v4/doublestar.go | 13 + .../github.com/bmatcuk/doublestar/v4/glob.go | 473 ++++++ .../bmatcuk/doublestar/v4/globoptions.go | 144 ++ .../bmatcuk/doublestar/v4/globwalk.go | 414 +++++ .../github.com/bmatcuk/doublestar/v4/match.go | 381 +++++ .../github.com/bmatcuk/doublestar/v4/utils.go | 147 ++ .../bmatcuk/doublestar/v4/validate.go | 82 + .../hashicorp/go-version/CHANGELOG.md | 19 + .../github.com/hashicorp/go-version/LICENSE | 2 + .../github.com/hashicorp/go-version/README.md | 2 +- .../hashicorp/go-version/constraint.go | 6 +- .../hashicorp/go-version/version.go | 46 +- .../go-version/version_collection.go | 3 + .../hashicorp/hc-install/.go-version | 2 +- .../github.com/hashicorp/hc-install/README.md | 88 +- .../hc-install/checkpoint/latest_version.go | 18 +- .../github.com/hashicorp/hc-install/fs/fs.go | 4 +- .../hashicorp/hc-install/fs/fs_unix.go | 4 +- .../hashicorp/hc-install/installer.go | 4 +- .../hc-install/internal/build/go_build.go | 8 +- .../internal/httpclient/httpclient.go | 15 +- .../releasesjson/checksum_downloader.go | 4 +- .../internal/releasesjson/downloader.go | 104 +- .../internal/releasesjson/product_version.go | 3 +- .../internal/releasesjson/releases.go | 8 +- .../hashicorp/hc-install/product/consul.go | 4 - .../hc-install/releases/enterprise.go | 7 +- .../hc-install/releases/exact_version.go | 33 +- .../hc-install/releases/latest_version.go | 33 +- .../hashicorp/hc-install/releases/releases.go | 4 +- .../hashicorp/hc-install/releases/versions.go | 11 +- .../hashicorp/hc-install/version/VERSION | 2 +- .../github.com/hashicorp/hcl/v2/CHANGELOG.md | 11 + .../hashicorp/hcl/v2/hclsyntax/expression.go | 15 +- .../hcl/v2/hclsyntax/parser_traversal.go | 51 +- .../hashicorp/hcl/v2/hclsyntax/public.go | 31 + .../internal/check/directory.go | 174 ++ .../internal/check/file.go | 39 + .../internal/check/file_extension.go | 64 + .../internal/check/file_mismatch.go | 284 ++++ .../internal/check/frontmatter.go | 104 ++ .../internal/check/provider_file.go | 67 + .../internal/cmd/generate.go | 2 +- .../internal/cmd/migrate.go | 4 + .../internal/cmd/validate.go | 21 +- .../{ => internal}/functionmd/render.go | 2 +- .../internal/mdplain/mdplain.go | 26 +- .../internal/mdplain/renderer.go | 250 +-- .../internal/provider/generate.go | 5 +- .../internal/provider/logger.go | 27 + .../internal/provider/migrate.go | 93 +- .../internal/provider/schema.go | 136 ++ .../internal/provider/template.go | 5 +- .../internal/provider/util.go | 11 +- .../internal/provider/validate.go | 498 +++--- .../{ => internal}/schemamd/behaviors.go | 0 .../{ => internal}/schemamd/render.go | 68 +- .../schemamd/write_attribute_description.go | 0 .../schemamd/write_block_type_description.go | 0 ...write_nested_attribute_type_description.go | 0 .../{ => internal}/schemamd/write_type.go | 0 .../terraform-plugin-testing/compare/doc.go | 5 + .../compare/value_comparer.go | 13 + .../compare/values_differ.go | 31 + .../compare/values_same.go | 31 + .../helper/resource/additional_cli_options.go | 26 + .../helper/resource/state_shim.go | 30 +- .../helper/resource/testing.go | 4 + .../helper/resource/testing_new_config.go | 23 +- .../internal/plugintest/working_dir.go | 3 +- .../knownvalue/float32.go | 51 + .../knownvalue/int32.go | 51 + .../knownvalue/tuple.go | 67 + .../knownvalue/tuple_partial.go | 89 + .../knownvalue/tuple_size.go | 55 + .../plancheck/deferred_reason.go | 21 + .../plancheck/expect_deferred_change.go | 49 + .../plancheck/expect_no_deferred_changes.go | 52 + .../statecheck/compare_value.go | 114 ++ .../statecheck/compare_value_collection.go | 223 +++ .../statecheck/compare_value_pairs.go | 111 ++ .../tfversion/require_above.go | 27 +- .../tfversion/require_below.go | 25 +- .../tfversion/require_between.go | 37 +- .../tfversion/require_not.go | 21 +- .../tfversion/skip_above.go | 27 +- .../tfversion/skip_below.go | 27 +- .../tfversion/skip_between.go | 35 +- .../tfversion/skip_if.go | 21 +- .../tfversion/skip_if_not_prerelease.go | 28 + .../tfversion/versions.go | 1 + .../russross/blackfriday/.gitignore | 8 - .../russross/blackfriday/.travis.yml | 18 - .../russross/blackfriday/LICENSE.txt | 28 - .../github.com/russross/blackfriday/README.md | 364 ---- .../github.com/russross/blackfriday/block.go | 1480 ----------------- vendor/github.com/russross/blackfriday/doc.go | 32 - .../github.com/russross/blackfriday/html.go | 945 ----------- .../github.com/russross/blackfriday/inline.go | 1154 ------------- .../github.com/russross/blackfriday/latex.go | 334 ---- .../russross/blackfriday/markdown.go | 943 ----------- .../russross/blackfriday/smartypants.go | 430 ----- vendor/github.com/yuin/goldmark/README.md | 39 +- .../yuin/goldmark/extension/footnote.go | 24 +- .../yuin/goldmark/extension/linkify.go | 8 +- .../yuin/goldmark/extension/typographer.go | 4 +- .../yuin/goldmark/parser/html_block.go | 2 +- .../yuin/goldmark/parser/raw_html.go | 39 +- .../yuin/goldmark/renderer/html/html.go | 9 +- vendor/github.com/yuin/goldmark/util/util.go | 2 +- .../go-cty/cty/function/stdlib/collection.go | 13 +- vendor/github.com/zclconf/go-cty/cty/walk.go | 2 +- .../goldmark/frontmatter/.changie.yaml | 26 + .../goldmark/frontmatter/.gitignore | 4 + .../goldmark/frontmatter/.golangci.yml | 26 + .../goldmark/frontmatter/CHANGELOG.md | 13 + .../go.abhg.dev/goldmark/frontmatter/LICENSE | 28 + .../go.abhg.dev/goldmark/frontmatter/Makefile | 63 + .../goldmark/frontmatter/README.md | 197 +++ .../go.abhg.dev/goldmark/frontmatter/data.go | 45 + .../go.abhg.dev/goldmark/frontmatter/doc.go | 6 + .../goldmark/frontmatter/extend.go | 70 + .../goldmark/frontmatter/format.go | 58 + .../goldmark/frontmatter/mode_string.go | 24 + .../go.abhg.dev/goldmark/frontmatter/parse.go | 192 +++ .../goldmark/frontmatter/transform.go | 35 + vendor/golang.org/x/mod/modfile/rule.go | 106 +- vendor/golang.org/x/mod/modfile/work.go | 52 +- vendor/modules.txt | 39 +- 151 files changed, 10614 insertions(+), 6523 deletions(-) create mode 100644 vendor/github.com/BurntSushi/toml/.gitignore create mode 100644 vendor/github.com/BurntSushi/toml/COPYING create mode 100644 vendor/github.com/BurntSushi/toml/README.md create mode 100644 vendor/github.com/BurntSushi/toml/decode.go create mode 100644 vendor/github.com/BurntSushi/toml/decode_go116.go create mode 100644 vendor/github.com/BurntSushi/toml/deprecated.go create mode 100644 vendor/github.com/BurntSushi/toml/doc.go create mode 100644 vendor/github.com/BurntSushi/toml/encode.go create mode 100644 vendor/github.com/BurntSushi/toml/error.go create mode 100644 vendor/github.com/BurntSushi/toml/internal/tz.go create mode 100644 vendor/github.com/BurntSushi/toml/lex.go create mode 100644 vendor/github.com/BurntSushi/toml/meta.go create mode 100644 vendor/github.com/BurntSushi/toml/parse.go create mode 100644 vendor/github.com/BurntSushi/toml/type_fields.go create mode 100644 vendor/github.com/BurntSushi/toml/type_toml.go create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/.codecov.yml create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/.gitignore create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/LICENSE create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/README.md create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/UPGRADING.md create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/doublestar.go create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/glob.go create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/globoptions.go create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/globwalk.go create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/match.go create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/utils.go create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/validate.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/internal/check/directory.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/internal/check/file.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/internal/check/file_extension.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/internal/check/file_mismatch.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/internal/check/frontmatter.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/internal/check/provider_file.go rename vendor/github.com/hashicorp/terraform-plugin-docs/{ => internal}/functionmd/render.go (97%) create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/logger.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/schema.go rename vendor/github.com/hashicorp/terraform-plugin-docs/{ => internal}/schemamd/behaviors.go (100%) rename vendor/github.com/hashicorp/terraform-plugin-docs/{ => internal}/schemamd/render.go (90%) rename vendor/github.com/hashicorp/terraform-plugin-docs/{ => internal}/schemamd/write_attribute_description.go (100%) rename vendor/github.com/hashicorp/terraform-plugin-docs/{ => internal}/schemamd/write_block_type_description.go (100%) rename vendor/github.com/hashicorp/terraform-plugin-docs/{ => internal}/schemamd/write_nested_attribute_type_description.go (100%) rename vendor/github.com/hashicorp/terraform-plugin-docs/{ => internal}/schemamd/write_type.go (100%) create mode 100644 vendor/github.com/hashicorp/terraform-plugin-testing/compare/doc.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-testing/compare/value_comparer.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-testing/compare/values_differ.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-testing/compare/values_same.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/additional_cli_options.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-testing/knownvalue/float32.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-testing/knownvalue/int32.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-testing/knownvalue/tuple.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-testing/knownvalue/tuple_partial.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-testing/knownvalue/tuple_size.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/deferred_reason.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_deferred_change.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_no_deferred_changes.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-testing/statecheck/compare_value.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-testing/statecheck/compare_value_collection.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-testing/statecheck/compare_value_pairs.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_if_not_prerelease.go delete mode 100644 vendor/github.com/russross/blackfriday/.gitignore delete mode 100644 vendor/github.com/russross/blackfriday/.travis.yml delete mode 100644 vendor/github.com/russross/blackfriday/LICENSE.txt delete mode 100644 vendor/github.com/russross/blackfriday/README.md delete mode 100644 vendor/github.com/russross/blackfriday/block.go delete mode 100644 vendor/github.com/russross/blackfriday/doc.go delete mode 100644 vendor/github.com/russross/blackfriday/html.go delete mode 100644 vendor/github.com/russross/blackfriday/inline.go delete mode 100644 vendor/github.com/russross/blackfriday/latex.go delete mode 100644 vendor/github.com/russross/blackfriday/markdown.go delete mode 100644 vendor/github.com/russross/blackfriday/smartypants.go create mode 100644 vendor/go.abhg.dev/goldmark/frontmatter/.changie.yaml create mode 100644 vendor/go.abhg.dev/goldmark/frontmatter/.gitignore create mode 100644 vendor/go.abhg.dev/goldmark/frontmatter/.golangci.yml create mode 100644 vendor/go.abhg.dev/goldmark/frontmatter/CHANGELOG.md create mode 100644 vendor/go.abhg.dev/goldmark/frontmatter/LICENSE create mode 100644 vendor/go.abhg.dev/goldmark/frontmatter/Makefile create mode 100644 vendor/go.abhg.dev/goldmark/frontmatter/README.md create mode 100644 vendor/go.abhg.dev/goldmark/frontmatter/data.go create mode 100644 vendor/go.abhg.dev/goldmark/frontmatter/doc.go create mode 100644 vendor/go.abhg.dev/goldmark/frontmatter/extend.go create mode 100644 vendor/go.abhg.dev/goldmark/frontmatter/format.go create mode 100644 vendor/go.abhg.dev/goldmark/frontmatter/mode_string.go create mode 100644 vendor/go.abhg.dev/goldmark/frontmatter/parse.go create mode 100644 vendor/go.abhg.dev/goldmark/frontmatter/transform.go diff --git a/go.mod b/go.mod index cf0c8ba7..c15a37ab 100644 --- a/go.mod +++ b/go.mod @@ -9,17 +9,18 @@ require ( github.com/golang/mock v1.6.0 github.com/google/uuid v1.6.0 github.com/hashicorp/go-retryablehttp v0.7.7 - github.com/hashicorp/terraform-plugin-docs v0.18.0 + github.com/hashicorp/terraform-plugin-docs v0.19.4 github.com/hashicorp/terraform-plugin-framework v1.11.0 github.com/hashicorp/terraform-plugin-framework-validators v0.13.0 github.com/hashicorp/terraform-plugin-go v0.23.0 github.com/hashicorp/terraform-plugin-log v0.9.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 - github.com/hashicorp/terraform-plugin-testing v1.7.0 + github.com/hashicorp/terraform-plugin-testing v1.10.0 github.com/stretchr/testify v1.9.0 ) require ( + github.com/BurntSushi/toml v1.2.1 // indirect github.com/Kunde21/markdownfmt/v3 v3.1.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.2.0 // indirect @@ -29,6 +30,7 @@ require ( github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/armon/go-radix v1.0.0 // indirect github.com/bgentry/speakeasy v0.1.0 // indirect + github.com/bmatcuk/doublestar/v4 v4.6.1 // indirect github.com/cloudflare/circl v1.3.7 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/fatih/color v1.17.0 // indirect @@ -43,9 +45,9 @@ require ( github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-plugin v1.6.1 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect - github.com/hashicorp/go-version v1.6.0 // indirect - github.com/hashicorp/hc-install v0.6.4 // indirect - github.com/hashicorp/hcl/v2 v2.20.1 // indirect + github.com/hashicorp/go-version v1.7.0 // indirect + github.com/hashicorp/hc-install v0.8.0 // indirect + github.com/hashicorp/hcl/v2 v2.21.0 // indirect github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.21.0 // indirect github.com/hashicorp/terraform-json v0.22.1 // indirect @@ -65,18 +67,18 @@ require ( github.com/oklog/run v1.1.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/posener/complete v1.2.3 // indirect - github.com/russross/blackfriday v1.6.0 // indirect github.com/shopspring/decimal v1.3.1 // indirect github.com/spf13/cast v1.5.0 // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect - github.com/yuin/goldmark v1.6.0 // indirect + github.com/yuin/goldmark v1.7.1 // indirect github.com/yuin/goldmark-meta v1.1.0 // indirect - github.com/zclconf/go-cty v1.14.4 // indirect + github.com/zclconf/go-cty v1.15.0 // indirect + go.abhg.dev/goldmark/frontmatter v0.2.0 // indirect golang.org/x/crypto v0.26.0 // indirect golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df // indirect - golang.org/x/mod v0.17.0 // indirect + golang.org/x/mod v0.19.0 // indirect golang.org/x/net v0.28.0 // indirect golang.org/x/sync v0.8.0 // indirect golang.org/x/sys v0.24.0 // indirect diff --git a/go.sum b/go.sum index 6764b8ed..c31c03ea 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,7 @@ dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/Kunde21/markdownfmt/v3 v3.1.0 h1:KiZu9LKs+wFFBQKhrZJrFZwtLnCCWJahL+S+E/3VnM0= github.com/Kunde21/markdownfmt/v3 v3.1.0/go.mod h1:tPXN1RTyOzJwhfHoon9wUr4HGYmWgVxSQN6VBJDkrVc= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= @@ -21,6 +23,8 @@ github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwNy7PA4I= +github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= @@ -87,20 +91,20 @@ github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFO github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.6.4 h1:QLqlM56/+SIIGvGcfFiwMY3z5WGXT066suo/v9Km8e0= -github.com/hashicorp/hc-install v0.6.4/go.mod h1:05LWLy8TD842OtgcfBbOT0WMoInBMUSHjmDx10zuBIA= -github.com/hashicorp/hcl/v2 v2.20.1 h1:M6hgdyz7HYt1UN9e61j+qKJBqR3orTWbI1HKBJEdxtc= -github.com/hashicorp/hcl/v2 v2.20.1/go.mod h1:TZDqQ4kNKCbh1iJp99FdPiUaVDDUPivbqxZulxDYqL4= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/hc-install v0.8.0 h1:LdpZeXkZYMQhoKPCecJHlKvUkQFixN/nvyR1CdfOLjI= +github.com/hashicorp/hc-install v0.8.0/go.mod h1:+MwJYjDfCruSD/udvBmRB22Nlkwwkwf5sAB6uTIhSaU= +github.com/hashicorp/hcl/v2 v2.21.0 h1:lve4q/o/2rqwYOgUg3y3V2YPyD1/zkCLGjIV74Jit14= +github.com/hashicorp/hcl/v2 v2.21.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVWkd/RG0D2XQ= github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg= github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec= github.com/hashicorp/terraform-json v0.22.1/go.mod h1:JbWSQCLFSXFFhg42T7l9iJwdGXBYV8fmmD6o/ML4p3A= -github.com/hashicorp/terraform-plugin-docs v0.18.0 h1:2bINhzXc+yDeAcafurshCrIjtdu1XHn9zZ3ISuEhgpk= -github.com/hashicorp/terraform-plugin-docs v0.18.0/go.mod h1:iIUfaJpdUmpi+rI42Kgq+63jAjI8aZVTyxp3Bvk9Hg8= +github.com/hashicorp/terraform-plugin-docs v0.19.4 h1:G3Bgo7J22OMtegIgn8Cd/CaSeyEljqjH3G39w28JK4c= +github.com/hashicorp/terraform-plugin-docs v0.19.4/go.mod h1:4pLASsatTmRynVzsjEhbXZ6s7xBlUw/2Kt0zfrq8HxA= github.com/hashicorp/terraform-plugin-framework v1.11.0 h1:M7+9zBArexHFXDx/pKTxjE6n/2UCXY6b8FIq9ZYhwfE= github.com/hashicorp/terraform-plugin-framework v1.11.0/go.mod h1:qBXLDn69kM97NNVi/MQ9qgd1uWWsVftGSnygYG1tImM= github.com/hashicorp/terraform-plugin-framework-validators v0.13.0 h1:bxZfGo9DIUoLLtHMElsu+zwqI4IsMZQBRRy4iLzZJ8E= @@ -111,8 +115,8 @@ github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9T github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwUYLVdlYeamldAHSxjUFADow= github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 h1:kJiWGx2kiQVo97Y5IOGR4EMcZ8DtMswHhUuFibsCQQE= github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0/go.mod h1:sl/UoabMc37HA6ICVMmGO+/0wofkVIRxf+BMb/dnoIg= -github.com/hashicorp/terraform-plugin-testing v1.7.0 h1:I6aeCyZ30z4NiI3tzyDoO6fS7YxP5xSL1ceOon3gTe8= -github.com/hashicorp/terraform-plugin-testing v1.7.0/go.mod h1:sbAreCleJNOCz+y5vVHV8EJkIWZKi/t4ndKiUjM9vao= +github.com/hashicorp/terraform-plugin-testing v1.10.0 h1:2+tmRNhvnfE4Bs8rB6v58S/VpqzGC6RCh9Y8ujdn+aw= +github.com/hashicorp/terraform-plugin-testing v1.10.0/go.mod h1:iWRW3+loP33WMch2P/TEyCxxct/ZEcCGMquSLSCVsrc= github.com/hashicorp/terraform-registry-address v0.2.3 h1:2TAiKJ1A3MAkZlH1YI/aTVcLZRu7JseiXNRHbOAyoTI= github.com/hashicorp/terraform-registry-address v0.2.3/go.mod h1:lFHA76T8jfQteVfT7caREqguFrW3c4MFSPhZB7HHgUM= github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ= @@ -171,8 +175,6 @@ github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXq github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= -github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= @@ -201,14 +203,16 @@ github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yuin/goldmark v1.6.0 h1:boZcn2GTjpsynOsC0iJHnBWa4Bi0qzfJjthwauItG68= -github.com/yuin/goldmark v1.6.0/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yuin/goldmark v1.7.1 h1:3bajkSilaCbjdKVsKdZjZCLBNPL9pYzrCakKaf4U49U= +github.com/yuin/goldmark v1.7.1/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E= github.com/yuin/goldmark-meta v1.1.0 h1:pWw+JLHGZe8Rk0EGsMVssiNb/AaPMHfSRszZeUeiOUc= github.com/yuin/goldmark-meta v1.1.0/go.mod h1:U4spWENafuA7Zyg+Lj5RqK/MF+ovMYtBvXi1lBb2VP0= -github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= -github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= -github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b h1:FosyBZYxY34Wul7O/MSKey3txpPYyCqVO5ZyceuQJEI= -github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= +github.com/zclconf/go-cty v1.15.0 h1:tTCRWxsexYUmtt/wVxgDClUe+uQusuI443uL6e+5sXQ= +github.com/zclconf/go-cty v1.15.0/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= +go.abhg.dev/goldmark/frontmatter v0.2.0 h1:P8kPG0YkL12+aYk2yU3xHv4tcXzeVnN+gU0tJ5JnxRw= +go.abhg.dev/goldmark/frontmatter v0.2.0/go.mod h1:XqrEkZuM57djk7zrlRUB02x8I5J0px76YjkOzhB4YlU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -219,8 +223,8 @@ golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df h1:UA2aFVmmsIlefxMk29Dp2juaU golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore new file mode 100644 index 00000000..fe79e3ad --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/.gitignore @@ -0,0 +1,2 @@ +/toml.test +/toml-test diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING new file mode 100644 index 00000000..01b57432 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md new file mode 100644 index 00000000..3651cfa9 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -0,0 +1,120 @@ +TOML stands for Tom's Obvious, Minimal Language. This Go package provides a +reflection interface similar to Go's standard library `json` and `xml` packages. + +Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). + +Documentation: https://godocs.io/github.com/BurntSushi/toml + +See the [releases page](https://github.com/BurntSushi/toml/releases) for a +changelog; this information is also in the git tag annotations (e.g. `git show +v0.4.0`). + +This library requires Go 1.13 or newer; add it to your go.mod with: + + % go get github.com/BurntSushi/toml@latest + +It also comes with a TOML validator CLI tool: + + % go install github.com/BurntSushi/toml/cmd/tomlv@latest + % tomlv some-toml-file.toml + +### Examples +For the simplest example, consider some TOML file as just a list of keys and +values: + +```toml +Age = 25 +Cats = [ "Cauchy", "Plato" ] +Pi = 3.14 +Perfection = [ 6, 28, 496, 8128 ] +DOB = 1987-07-05T05:45:00Z +``` + +Which can be decoded with: + +```go +type Config struct { + Age int + Cats []string + Pi float64 + Perfection []int + DOB time.Time +} + +var conf Config +_, err := toml.Decode(tomlData, &conf) +``` + +You can also use struct tags if your struct field name doesn't map to a TOML key +value directly: + +```toml +some_key_NAME = "wat" +``` + +```go +type TOML struct { + ObscureKey string `toml:"some_key_NAME"` +} +``` + +Beware that like other decoders **only exported fields** are considered when +encoding and decoding; private fields are silently ignored. + +### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces +Here's an example that automatically parses values in a `mail.Address`: + +```toml +contacts = [ + "Donald Duck ", + "Scrooge McDuck ", +] +``` + +Can be decoded with: + +```go +// Create address type which satisfies the encoding.TextUnmarshaler interface. +type address struct { + *mail.Address +} + +func (a *address) UnmarshalText(text []byte) error { + var err error + a.Address, err = mail.ParseAddress(string(text)) + return err +} + +// Decode it. +func decode() { + blob := ` + contacts = [ + "Donald Duck ", + "Scrooge McDuck ", + ] + ` + + var contacts struct { + Contacts []address + } + + _, err := toml.Decode(blob, &contacts) + if err != nil { + log.Fatal(err) + } + + for _, c := range contacts.Contacts { + fmt.Printf("%#v\n", c.Address) + } + + // Output: + // &mail.Address{Name:"Donald Duck", Address:"donald@duckburg.com"} + // &mail.Address{Name:"Scrooge McDuck", Address:"scrooge@duckburg.com"} +} +``` + +To target TOML specifically you can implement `UnmarshalTOML` TOML interface in +a similar way. + +### More complex usage +See the [`_example/`](/_example) directory for a more complex example. diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go new file mode 100644 index 00000000..0ca1dc4f --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -0,0 +1,602 @@ +package toml + +import ( + "bytes" + "encoding" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "reflect" + "strconv" + "strings" + "time" +) + +// Unmarshaler is the interface implemented by objects that can unmarshal a +// TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + +// Unmarshal decodes the contents of data in TOML format into a pointer v. +// +// See [Decoder] for a description of the decoding process. +func Unmarshal(data []byte, v interface{}) error { + _, err := NewDecoder(bytes.NewReader(data)).Decode(v) + return err +} + +// Decode the TOML data in to the pointer v. +// +// See [Decoder] for a description of the decoding process. +func Decode(data string, v interface{}) (MetaData, error) { + return NewDecoder(strings.NewReader(data)).Decode(v) +} + +// DecodeFile reads the contents of a file and decodes it with [Decode]. +func DecodeFile(path string, v interface{}) (MetaData, error) { + fp, err := os.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} + +// Primitive is a TOML value that hasn't been decoded into a Go value. +// +// This type can be used for any value, which will cause decoding to be delayed. +// You can use [PrimitiveDecode] to "manually" decode these values. +// +// NOTE: The underlying representation of a `Primitive` value is subject to +// change. Do not rely on it. +// +// NOTE: Primitive values are still parsed, so using them will only avoid the +// overhead of reflection. They can be useful when you don't know the exact type +// of TOML data until runtime. +type Primitive struct { + undecoded interface{} + context Key +} + +// The significand precision for float32 and float64 is 24 and 53 bits; this is +// the range a natural number can be stored in a float without loss of data. +const ( + maxSafeFloat32Int = 16777215 // 2^24-1 + maxSafeFloat64Int = int64(9007199254740991) // 2^53-1 +) + +// Decoder decodes TOML data. +// +// TOML tables correspond to Go structs or maps; they can be used +// interchangeably, but structs offer better type safety. +// +// TOML table arrays correspond to either a slice of structs or a slice of maps. +// +// TOML datetimes correspond to [time.Time]. Local datetimes are parsed in the +// local timezone. +// +// [time.Duration] types are treated as nanoseconds if the TOML value is an +// integer, or they're parsed with time.ParseDuration() if they're strings. +// +// All other TOML types (float, string, int, bool and array) correspond to the +// obvious Go types. +// +// An exception to the above rules is if a type implements the TextUnmarshaler +// interface, in which case any primitive TOML value (floats, strings, integers, +// booleans, datetimes) will be converted to a []byte and given to the value's +// UnmarshalText method. See the Unmarshaler example for a demonstration with +// email addresses. +// +// ### Key mapping +// +// TOML keys can map to either keys in a Go map or field names in a Go struct. +// The special `toml` struct tag can be used to map TOML keys to struct fields +// that don't match the key name exactly (see the example). A case insensitive +// match to struct names will be tried if an exact match can't be found. +// +// The mapping between TOML values and Go values is loose. That is, there may +// exist TOML values that cannot be placed into your representation, and there +// may be parts of your representation that do not correspond to TOML values. +// This loose mapping can be made stricter by using the IsDefined and/or +// Undecoded methods on the MetaData returned. +// +// This decoder does not handle cyclic types. Decode will not terminate if a +// cyclic type is passed. +type Decoder struct { + r io.Reader +} + +// NewDecoder creates a new Decoder. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r} +} + +var ( + unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + primitiveType = reflect.TypeOf((*Primitive)(nil)).Elem() +) + +// Decode TOML data in to the pointer `v`. +func (dec *Decoder) Decode(v interface{}) (MetaData, error) { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + s := "%q" + if reflect.TypeOf(v) == nil { + s = "%v" + } + + return MetaData{}, fmt.Errorf("toml: cannot decode to non-pointer "+s, reflect.TypeOf(v)) + } + if rv.IsNil() { + return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v)) + } + + // Check if this is a supported type: struct, map, interface{}, or something + // that implements UnmarshalTOML or UnmarshalText. + rv = indirect(rv) + rt := rv.Type() + if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map && + !(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) && + !rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) { + return MetaData{}, fmt.Errorf("toml: cannot decode to type %s", rt) + } + + // TODO: parser should read from io.Reader? Or at the very least, make it + // read from []byte rather than string + data, err := ioutil.ReadAll(dec.r) + if err != nil { + return MetaData{}, err + } + + p, err := parse(string(data)) + if err != nil { + return MetaData{}, err + } + + md := MetaData{ + mapping: p.mapping, + keyInfo: p.keyInfo, + keys: p.ordered, + decoded: make(map[string]struct{}, len(p.ordered)), + context: nil, + data: data, + } + return md, md.unify(p.mapping, rv) +} + +// PrimitiveDecode is just like the other Decode* functions, except it decodes a +// TOML value that has already been parsed. Valid primitive values can *only* be +// obtained from values filled by the decoder functions, including this method. +// (i.e., v may contain more [Primitive] values.) +// +// Meta data for primitive values is included in the meta data returned by the +// Decode* functions with one exception: keys returned by the Undecoded method +// will only reflect keys that were decoded. Namely, any keys hidden behind a +// Primitive will be considered undecoded. Executing this method will update the +// undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) +} + +// unify performs a sort of type unification based on the structure of `rv`, +// which is the client representation. +// +// Any type mismatch produces an error. Finding a type that we don't know +// how to handle produces an unsupported type error. +func (md *MetaData) unify(data interface{}, rv reflect.Value) error { + // Special case. Look for a `Primitive` value. + // TODO: #76 would make this superfluous after implemented. + if rv.Type() == primitiveType { + // Save the undecoded data and the key context into the primitive + // value. + context := make(Key, len(md.context)) + copy(context, md.context) + rv.Set(reflect.ValueOf(Primitive{ + undecoded: data, + context: context, + })) + return nil + } + + rvi := rv.Interface() + if v, ok := rvi.(Unmarshaler); ok { + return v.UnmarshalTOML(data) + } + if v, ok := rvi.(encoding.TextUnmarshaler); ok { + return md.unifyText(data, v) + } + + // TODO: + // The behavior here is incorrect whenever a Go type satisfies the + // encoding.TextUnmarshaler interface but also corresponds to a TOML hash or + // array. In particular, the unmarshaler should only be applied to primitive + // TOML values. But at this point, it will be applied to all kinds of values + // and produce an incorrect error whenever those values are hashes or arrays + // (including arrays of tables). + + k := rv.Kind() + + if k >= reflect.Int && k <= reflect.Uint64 { + return md.unifyInt(data, rv) + } + switch k { + case reflect.Ptr: + elem := reflect.New(rv.Type().Elem()) + err := md.unify(data, reflect.Indirect(elem)) + if err != nil { + return err + } + rv.Set(elem) + return nil + case reflect.Struct: + return md.unifyStruct(data, rv) + case reflect.Map: + return md.unifyMap(data, rv) + case reflect.Array: + return md.unifyArray(data, rv) + case reflect.Slice: + return md.unifySlice(data, rv) + case reflect.String: + return md.unifyString(data, rv) + case reflect.Bool: + return md.unifyBool(data, rv) + case reflect.Interface: + if rv.NumMethod() > 0 { // Only support empty interfaces are supported. + return md.e("unsupported type %s", rv.Type()) + } + return md.unifyAnything(data, rv) + case reflect.Float32, reflect.Float64: + return md.unifyFloat64(data, rv) + } + return md.e("unsupported type %s", rv.Kind()) +} + +func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if mapping == nil { + return nil + } + return md.e("type mismatch for %s: expected table but found %T", + rv.Type().String(), mapping) + } + + for key, datum := range tmap { + var f *field + fields := cachedTypeFields(rv.Type()) + for i := range fields { + ff := &fields[i] + if ff.name == key { + f = ff + break + } + if f == nil && strings.EqualFold(ff.name, key) { + f = ff + } + } + if f != nil { + subv := rv + for _, i := range f.index { + subv = indirect(subv.Field(i)) + } + + if isUnifiable(subv) { + md.decoded[md.context.add(key).String()] = struct{}{} + md.context = append(md.context, key) + + err := md.unify(datum, subv) + if err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + } else if f.name != "" { + return md.e("cannot write unexported field %s.%s", rv.Type().String(), f.name) + } + } + } + return nil +} + +func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { + keyType := rv.Type().Key().Kind() + if keyType != reflect.String && keyType != reflect.Interface { + return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)", + keyType, rv.Type()) + } + + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if tmap == nil { + return nil + } + return md.badtype("map", mapping) + } + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + for k, v := range tmap { + md.decoded[md.context.add(k).String()] = struct{}{} + md.context = append(md.context, k) + + rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) + + err := md.unify(v, indirect(rvval)) + if err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + + rvkey := indirect(reflect.New(rv.Type().Key())) + + switch keyType { + case reflect.Interface: + rvkey.Set(reflect.ValueOf(k)) + case reflect.String: + rvkey.SetString(k) + } + + rv.SetMapIndex(rvkey, rvval) + } + return nil +} + +func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return md.badtype("slice", data) + } + if l := datav.Len(); l != rv.Len() { + return md.e("expected array length %d; got TOML array of length %d", rv.Len(), l) + } + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return md.badtype("slice", data) + } + n := datav.Len() + if rv.IsNil() || rv.Cap() < n { + rv.Set(reflect.MakeSlice(rv.Type(), n, n)) + } + rv.SetLen(n) + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { + l := data.Len() + for i := 0; i < l; i++ { + err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i))) + if err != nil { + return err + } + } + return nil +} + +func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { + _, ok := rv.Interface().(json.Number) + if ok { + if i, ok := data.(int64); ok { + rv.SetString(strconv.FormatInt(i, 10)) + } else if f, ok := data.(float64); ok { + rv.SetString(strconv.FormatFloat(f, 'f', -1, 64)) + } else { + return md.badtype("string", data) + } + return nil + } + + if s, ok := data.(string); ok { + rv.SetString(s) + return nil + } + return md.badtype("string", data) +} + +func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { + rvk := rv.Kind() + + if num, ok := data.(float64); ok { + switch rvk { + case reflect.Float32: + if num < -math.MaxFloat32 || num > math.MaxFloat32 { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + fallthrough + case reflect.Float64: + rv.SetFloat(num) + default: + panic("bug") + } + return nil + } + + if num, ok := data.(int64); ok { + if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) || + (rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetFloat(float64(num)) + return nil + } + + return md.badtype("float", data) +} + +func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { + _, ok := rv.Interface().(time.Duration) + if ok { + // Parse as string duration, and fall back to regular integer parsing + // (as nanosecond) if this is not a string. + if s, ok := data.(string); ok { + dur, err := time.ParseDuration(s) + if err != nil { + return md.parseErr(errParseDuration{s}) + } + rv.SetInt(int64(dur)) + return nil + } + } + + num, ok := data.(int64) + if !ok { + return md.badtype("integer", data) + } + + rvk := rv.Kind() + switch { + case rvk >= reflect.Int && rvk <= reflect.Int64: + if (rvk == reflect.Int8 && (num < math.MinInt8 || num > math.MaxInt8)) || + (rvk == reflect.Int16 && (num < math.MinInt16 || num > math.MaxInt16)) || + (rvk == reflect.Int32 && (num < math.MinInt32 || num > math.MaxInt32)) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetInt(num) + case rvk >= reflect.Uint && rvk <= reflect.Uint64: + unum := uint64(num) + if rvk == reflect.Uint8 && (num < 0 || unum > math.MaxUint8) || + rvk == reflect.Uint16 && (num < 0 || unum > math.MaxUint16) || + rvk == reflect.Uint32 && (num < 0 || unum > math.MaxUint32) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetUint(unum) + default: + panic("unreachable") + } + return nil +} + +func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { + if b, ok := data.(bool); ok { + rv.SetBool(b) + return nil + } + return md.badtype("boolean", data) +} + +func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { + rv.Set(reflect.ValueOf(data)) + return nil +} + +func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error { + var s string + switch sdata := data.(type) { + case Marshaler: + text, err := sdata.MarshalTOML() + if err != nil { + return err + } + s = string(text) + case encoding.TextMarshaler: + text, err := sdata.MarshalText() + if err != nil { + return err + } + s = string(text) + case fmt.Stringer: + s = sdata.String() + case string: + s = sdata + case bool: + s = fmt.Sprintf("%v", sdata) + case int64: + s = fmt.Sprintf("%d", sdata) + case float64: + s = fmt.Sprintf("%f", sdata) + default: + return md.badtype("primitive (string-like)", data) + } + if err := v.UnmarshalText([]byte(s)); err != nil { + return err + } + return nil +} + +func (md *MetaData) badtype(dst string, data interface{}) error { + return md.e("incompatible types: TOML value has type %T; destination has type %s", data, dst) +} + +func (md *MetaData) parseErr(err error) error { + k := md.context.String() + return ParseError{ + LastKey: k, + Position: md.keyInfo[k].pos, + Line: md.keyInfo[k].pos.Line, + err: err, + input: string(md.data), + } +} + +func (md *MetaData) e(format string, args ...interface{}) error { + f := "toml: " + if len(md.context) > 0 { + f = fmt.Sprintf("toml: (last key %q): ", md.context) + p := md.keyInfo[md.context.String()].pos + if p.Line > 0 { + f = fmt.Sprintf("toml: line %d (last key %q): ", p.Line, md.context) + } + } + return fmt.Errorf(f+format, args...) +} + +// rvalue returns a reflect.Value of `v`. All pointers are resolved. +func rvalue(v interface{}) reflect.Value { + return indirect(reflect.ValueOf(v)) +} + +// indirect returns the value pointed to by a pointer. +// +// Pointers are followed until the value is not a pointer. New values are +// allocated for each nil pointer. +// +// An exception to this rule is if the value satisfies an interface of interest +// to us (like encoding.TextUnmarshaler). +func indirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr { + if v.CanSet() { + pv := v.Addr() + pvi := pv.Interface() + if _, ok := pvi.(encoding.TextUnmarshaler); ok { + return pv + } + if _, ok := pvi.(Unmarshaler); ok { + return pv + } + } + return v + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return indirect(reflect.Indirect(v)) +} + +func isUnifiable(rv reflect.Value) bool { + if rv.CanSet() { + return true + } + rvi := rv.Interface() + if _, ok := rvi.(encoding.TextUnmarshaler); ok { + return true + } + if _, ok := rvi.(Unmarshaler); ok { + return true + } + return false +} diff --git a/vendor/github.com/BurntSushi/toml/decode_go116.go b/vendor/github.com/BurntSushi/toml/decode_go116.go new file mode 100644 index 00000000..086d0b68 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode_go116.go @@ -0,0 +1,19 @@ +//go:build go1.16 +// +build go1.16 + +package toml + +import ( + "io/fs" +) + +// DecodeFS reads the contents of a file from [fs.FS] and decodes it with +// [Decode]. +func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) { + fp, err := fsys.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go new file mode 100644 index 00000000..c6af3f23 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/deprecated.go @@ -0,0 +1,21 @@ +package toml + +import ( + "encoding" + "io" +) + +// Deprecated: use encoding.TextMarshaler +type TextMarshaler encoding.TextMarshaler + +// Deprecated: use encoding.TextUnmarshaler +type TextUnmarshaler encoding.TextUnmarshaler + +// Deprecated: use MetaData.PrimitiveDecode. +func PrimitiveDecode(primValue Primitive, v interface{}) error { + md := MetaData{decoded: make(map[string]struct{})} + return md.unify(primValue.undecoded, rvalue(v)) +} + +// Deprecated: use NewDecoder(reader).Decode(&value). +func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) } diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go new file mode 100644 index 00000000..81a7c0fe --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -0,0 +1,11 @@ +// Package toml implements decoding and encoding of TOML files. +// +// This package supports TOML v1.0.0, as specified at https://toml.io +// +// There is also support for delaying decoding with the Primitive type, and +// querying the set of keys in a TOML document with the MetaData type. +// +// The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator, +// and can be used to verify if TOML document is valid. It can also be used to +// print the type of each key. +package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go new file mode 100644 index 00000000..930e1d52 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -0,0 +1,750 @@ +package toml + +import ( + "bufio" + "encoding" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/BurntSushi/toml/internal" +) + +type tomlEncodeError struct{ error } + +var ( + errArrayNilElement = errors.New("toml: cannot encode array with nil element") + errNonString = errors.New("toml: cannot encode a map with non-string key type") + errNoKey = errors.New("toml: top-level values must be Go maps or structs") + errAnything = errors.New("") // used in testing +) + +var dblQuotedReplacer = strings.NewReplacer( + "\"", "\\\"", + "\\", "\\\\", + "\x00", `\u0000`, + "\x01", `\u0001`, + "\x02", `\u0002`, + "\x03", `\u0003`, + "\x04", `\u0004`, + "\x05", `\u0005`, + "\x06", `\u0006`, + "\x07", `\u0007`, + "\b", `\b`, + "\t", `\t`, + "\n", `\n`, + "\x0b", `\u000b`, + "\f", `\f`, + "\r", `\r`, + "\x0e", `\u000e`, + "\x0f", `\u000f`, + "\x10", `\u0010`, + "\x11", `\u0011`, + "\x12", `\u0012`, + "\x13", `\u0013`, + "\x14", `\u0014`, + "\x15", `\u0015`, + "\x16", `\u0016`, + "\x17", `\u0017`, + "\x18", `\u0018`, + "\x19", `\u0019`, + "\x1a", `\u001a`, + "\x1b", `\u001b`, + "\x1c", `\u001c`, + "\x1d", `\u001d`, + "\x1e", `\u001e`, + "\x1f", `\u001f`, + "\x7f", `\u007f`, +) + +var ( + marshalToml = reflect.TypeOf((*Marshaler)(nil)).Elem() + marshalText = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + timeType = reflect.TypeOf((*time.Time)(nil)).Elem() +) + +// Marshaler is the interface implemented by types that can marshal themselves +// into valid TOML. +type Marshaler interface { + MarshalTOML() ([]byte, error) +} + +// Encoder encodes a Go to a TOML document. +// +// The mapping between Go values and TOML values should be precisely the same as +// for [Decode]. +// +// time.Time is encoded as a RFC 3339 string, and time.Duration as its string +// representation. +// +// The [Marshaler] and [encoding.TextMarshaler] interfaces are supported to +// encoding the value as custom TOML. +// +// If you want to write arbitrary binary data then you will need to use +// something like base64 since TOML does not have any binary types. +// +// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes +// are encoded first. +// +// Go maps will be sorted alphabetically by key for deterministic output. +// +// The toml struct tag can be used to provide the key name; if omitted the +// struct field name will be used. If the "omitempty" option is present the +// following value will be skipped: +// +// - arrays, slices, maps, and string with len of 0 +// - struct with all zero values +// - bool false +// +// If omitzero is given all int and float types with a value of 0 will be +// skipped. +// +// Encoding Go values without a corresponding TOML representation will return an +// error. Examples of this includes maps with non-string keys, slices with nil +// elements, embedded non-struct types, and nested slices containing maps or +// structs. (e.g. [][]map[string]string is not allowed but []map[string]string +// is okay, as is []map[string][]string). +// +// NOTE: only exported keys are encoded due to the use of reflection. Unexported +// keys are silently discarded. +type Encoder struct { + // String to use for a single indentation level; default is two spaces. + Indent string + + w *bufio.Writer + hasWritten bool // written any output to w yet? +} + +// NewEncoder create a new Encoder. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: bufio.NewWriter(w), + Indent: " ", + } +} + +// Encode writes a TOML representation of the Go value to the [Encoder]'s writer. +// +// An error is returned if the value given cannot be encoded to a valid TOML +// document. +func (enc *Encoder) Encode(v interface{}) error { + rv := eindirect(reflect.ValueOf(v)) + if err := enc.safeEncode(Key([]string{}), rv); err != nil { + return err + } + return enc.w.Flush() +} + +func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { + defer func() { + if r := recover(); r != nil { + if terr, ok := r.(tomlEncodeError); ok { + err = terr.error + return + } + panic(r) + } + }() + enc.encode(key, rv) + return nil +} + +func (enc *Encoder) encode(key Key, rv reflect.Value) { + // If we can marshal the type to text, then we use that. This prevents the + // encoder for handling these types as generic structs (or whatever the + // underlying type of a TextMarshaler is). + switch { + case isMarshaler(rv): + enc.writeKeyValue(key, rv, false) + return + case rv.Type() == primitiveType: // TODO: #76 would make this superfluous after implemented. + enc.encode(key, reflect.ValueOf(rv.Interface().(Primitive).undecoded)) + return + } + + k := rv.Kind() + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: + enc.writeKeyValue(key, rv, false) + case reflect.Array, reflect.Slice: + if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { + enc.eArrayOfTables(key, rv) + } else { + enc.writeKeyValue(key, rv, false) + } + case reflect.Interface: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Map: + if rv.IsNil() { + return + } + enc.eTable(key, rv) + case reflect.Ptr: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Struct: + enc.eTable(key, rv) + default: + encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k)) + } +} + +// eElement encodes any value that can be an array element. +func (enc *Encoder) eElement(rv reflect.Value) { + switch v := rv.Interface().(type) { + case time.Time: // Using TextMarshaler adds extra quotes, which we don't want. + format := time.RFC3339Nano + switch v.Location() { + case internal.LocalDatetime: + format = "2006-01-02T15:04:05.999999999" + case internal.LocalDate: + format = "2006-01-02" + case internal.LocalTime: + format = "15:04:05.999999999" + } + switch v.Location() { + default: + enc.wf(v.Format(format)) + case internal.LocalDatetime, internal.LocalDate, internal.LocalTime: + enc.wf(v.In(time.UTC).Format(format)) + } + return + case Marshaler: + s, err := v.MarshalTOML() + if err != nil { + encPanic(err) + } + if s == nil { + encPanic(errors.New("MarshalTOML returned nil and no error")) + } + enc.w.Write(s) + return + case encoding.TextMarshaler: + s, err := v.MarshalText() + if err != nil { + encPanic(err) + } + if s == nil { + encPanic(errors.New("MarshalText returned nil and no error")) + } + enc.writeQuoted(string(s)) + return + case time.Duration: + enc.writeQuoted(v.String()) + return + case json.Number: + n, _ := rv.Interface().(json.Number) + + if n == "" { /// Useful zero value. + enc.w.WriteByte('0') + return + } else if v, err := n.Int64(); err == nil { + enc.eElement(reflect.ValueOf(v)) + return + } else if v, err := n.Float64(); err == nil { + enc.eElement(reflect.ValueOf(v)) + return + } + encPanic(fmt.Errorf("unable to convert %q to int64 or float64", n)) + } + + switch rv.Kind() { + case reflect.Ptr: + enc.eElement(rv.Elem()) + return + case reflect.String: + enc.writeQuoted(rv.String()) + case reflect.Bool: + enc.wf(strconv.FormatBool(rv.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + enc.wf(strconv.FormatInt(rv.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + enc.wf(strconv.FormatUint(rv.Uint(), 10)) + case reflect.Float32: + f := rv.Float() + if math.IsNaN(f) { + enc.wf("nan") + } else if math.IsInf(f, 0) { + enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32))) + } + case reflect.Float64: + f := rv.Float() + if math.IsNaN(f) { + enc.wf("nan") + } else if math.IsInf(f, 0) { + enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64))) + } + case reflect.Array, reflect.Slice: + enc.eArrayOrSliceElement(rv) + case reflect.Struct: + enc.eStruct(nil, rv, true) + case reflect.Map: + enc.eMap(nil, rv, true) + case reflect.Interface: + enc.eElement(rv.Elem()) + default: + encPanic(fmt.Errorf("unexpected type: %T", rv.Interface())) + } +} + +// By the TOML spec, all floats must have a decimal with at least one number on +// either side. +func floatAddDecimal(fstr string) string { + if !strings.Contains(fstr, ".") { + return fstr + ".0" + } + return fstr +} + +func (enc *Encoder) writeQuoted(s string) { + enc.wf("\"%s\"", dblQuotedReplacer.Replace(s)) +} + +func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { + length := rv.Len() + enc.wf("[") + for i := 0; i < length; i++ { + elem := eindirect(rv.Index(i)) + enc.eElement(elem) + if i != length-1 { + enc.wf(", ") + } + } + enc.wf("]") +} + +func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + for i := 0; i < rv.Len(); i++ { + trv := eindirect(rv.Index(i)) + if isNil(trv) { + continue + } + enc.newline() + enc.wf("%s[[%s]]", enc.indentStr(key), key) + enc.newline() + enc.eMapOrStruct(key, trv, false) + } +} + +func (enc *Encoder) eTable(key Key, rv reflect.Value) { + if len(key) == 1 { + // Output an extra newline between top-level tables. + // (The newline isn't written if nothing else has been written though.) + enc.newline() + } + if len(key) > 0 { + enc.wf("%s[%s]", enc.indentStr(key), key) + enc.newline() + } + enc.eMapOrStruct(key, rv, false) +} + +func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) { + switch rv.Kind() { + case reflect.Map: + enc.eMap(key, rv, inline) + case reflect.Struct: + enc.eStruct(key, rv, inline) + default: + // Should never happen? + panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) + } +} + +func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { + rt := rv.Type() + if rt.Key().Kind() != reflect.String { + encPanic(errNonString) + } + + // Sort keys so that we have deterministic output. And write keys directly + // underneath this key first, before writing sub-structs or sub-maps. + var mapKeysDirect, mapKeysSub []string + for _, mapKey := range rv.MapKeys() { + k := mapKey.String() + if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) { + mapKeysSub = append(mapKeysSub, k) + } else { + mapKeysDirect = append(mapKeysDirect, k) + } + } + + var writeMapKeys = func(mapKeys []string, trailC bool) { + sort.Strings(mapKeys) + for i, mapKey := range mapKeys { + val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey))) + if isNil(val) { + continue + } + + if inline { + enc.writeKeyValue(Key{mapKey}, val, true) + if trailC || i != len(mapKeys)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(mapKey), val) + } + } + } + + if inline { + enc.wf("{") + } + writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0) + writeMapKeys(mapKeysSub, false) + if inline { + enc.wf("}") + } +} + +const is32Bit = (32 << (^uint(0) >> 63)) == 32 + +func pointerTo(t reflect.Type) reflect.Type { + if t.Kind() == reflect.Ptr { + return pointerTo(t.Elem()) + } + return t +} + +func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { + // Write keys for fields directly under this key first, because if we write + // a field that creates a new table then all keys under it will be in that + // table (not the one we're writing here). + // + // Fields is a [][]int: for fieldsDirect this always has one entry (the + // struct index). For fieldsSub it contains two entries: the parent field + // index from tv, and the field indexes for the fields of the sub. + var ( + rt = rv.Type() + fieldsDirect, fieldsSub [][]int + addFields func(rt reflect.Type, rv reflect.Value, start []int) + ) + addFields = func(rt reflect.Type, rv reflect.Value, start []int) { + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + isEmbed := f.Anonymous && pointerTo(f.Type).Kind() == reflect.Struct + if f.PkgPath != "" && !isEmbed { /// Skip unexported fields. + continue + } + opts := getOptions(f.Tag) + if opts.skip { + continue + } + + frv := eindirect(rv.Field(i)) + + // Treat anonymous struct fields with tag names as though they are + // not anonymous, like encoding/json does. + // + // Non-struct anonymous fields use the normal encoding logic. + if isEmbed { + if getOptions(f.Tag).name == "" && frv.Kind() == reflect.Struct { + addFields(frv.Type(), frv, append(start, f.Index...)) + continue + } + } + + if typeIsTable(tomlTypeOfGo(frv)) { + fieldsSub = append(fieldsSub, append(start, f.Index...)) + } else { + // Copy so it works correct on 32bit archs; not clear why this + // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 + // This also works fine on 64bit, but 32bit archs are somewhat + // rare and this is a wee bit faster. + if is32Bit { + copyStart := make([]int, len(start)) + copy(copyStart, start) + fieldsDirect = append(fieldsDirect, append(copyStart, f.Index...)) + } else { + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) + } + } + } + } + addFields(rt, rv, nil) + + writeFields := func(fields [][]int) { + for _, fieldIndex := range fields { + fieldType := rt.FieldByIndex(fieldIndex) + fieldVal := eindirect(rv.FieldByIndex(fieldIndex)) + + if isNil(fieldVal) { /// Don't write anything for nil fields. + continue + } + + opts := getOptions(fieldType.Tag) + if opts.skip { + continue + } + keyName := fieldType.Name + if opts.name != "" { + keyName = opts.name + } + + if opts.omitempty && enc.isEmpty(fieldVal) { + continue + } + if opts.omitzero && isZero(fieldVal) { + continue + } + + if inline { + enc.writeKeyValue(Key{keyName}, fieldVal, true) + if fieldIndex[0] != len(fields)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(keyName), fieldVal) + } + } + } + + if inline { + enc.wf("{") + } + writeFields(fieldsDirect) + writeFields(fieldsSub) + if inline { + enc.wf("}") + } +} + +// tomlTypeOfGo returns the TOML type name of the Go value's type. +// +// It is used to determine whether the types of array elements are mixed (which +// is forbidden). If the Go value is nil, then it is illegal for it to be an +// array element, and valueIsNil is returned as true. +// +// The type may be `nil`, which means no concrete TOML type could be found. +func tomlTypeOfGo(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() { + return nil + } + + if rv.Kind() == reflect.Struct { + if rv.Type() == timeType { + return tomlDatetime + } + if isMarshaler(rv) { + return tomlString + } + return tomlHash + } + + if isMarshaler(rv) { + return tomlString + } + + switch rv.Kind() { + case reflect.Bool: + return tomlBool + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + return tomlInteger + case reflect.Float32, reflect.Float64: + return tomlFloat + case reflect.Array, reflect.Slice: + if isTableArray(rv) { + return tomlArrayHash + } + return tomlArray + case reflect.Ptr, reflect.Interface: + return tomlTypeOfGo(rv.Elem()) + case reflect.String: + return tomlString + case reflect.Map: + return tomlHash + default: + encPanic(errors.New("unsupported type: " + rv.Kind().String())) + panic("unreachable") + } +} + +func isMarshaler(rv reflect.Value) bool { + return rv.Type().Implements(marshalText) || rv.Type().Implements(marshalToml) +} + +// isTableArray reports if all entries in the array or slice are a table. +func isTableArray(arr reflect.Value) bool { + if isNil(arr) || !arr.IsValid() || arr.Len() == 0 { + return false + } + + ret := true + for i := 0; i < arr.Len(); i++ { + tt := tomlTypeOfGo(eindirect(arr.Index(i))) + // Don't allow nil. + if tt == nil { + encPanic(errArrayNilElement) + } + + if ret && !typeEqual(tomlHash, tt) { + ret = false + } + } + return ret +} + +type tagOptions struct { + skip bool // "-" + name string + omitempty bool + omitzero bool +} + +func getOptions(tag reflect.StructTag) tagOptions { + t := tag.Get("toml") + if t == "-" { + return tagOptions{skip: true} + } + var opts tagOptions + parts := strings.Split(t, ",") + opts.name = parts[0] + for _, s := range parts[1:] { + switch s { + case "omitempty": + opts.omitempty = true + case "omitzero": + opts.omitzero = true + } + } + return opts +} + +func isZero(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rv.Uint() == 0 + case reflect.Float32, reflect.Float64: + return rv.Float() == 0.0 + } + return false +} + +func (enc *Encoder) isEmpty(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return rv.Len() == 0 + case reflect.Struct: + if rv.Type().Comparable() { + return reflect.Zero(rv.Type()).Interface() == rv.Interface() + } + // Need to also check if all the fields are empty, otherwise something + // like this with uncomparable types will always return true: + // + // type a struct{ field b } + // type b struct{ s []string } + // s := a{field: b{s: []string{"AAA"}}} + for i := 0; i < rv.NumField(); i++ { + if !enc.isEmpty(rv.Field(i)) { + return false + } + } + return true + case reflect.Bool: + return !rv.Bool() + } + return false +} + +func (enc *Encoder) newline() { + if enc.hasWritten { + enc.wf("\n") + } +} + +// Write a key/value pair: +// +// key = +// +// This is also used for "k = v" in inline tables; so something like this will +// be written in three calls: +// +// ┌───────────────────┐ +// │ ┌───┐ ┌────┐│ +// v v v v vv +// key = {k = 1, k2 = 2} +func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { + if len(key) == 0 { + encPanic(errNoKey) + } + enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) + enc.eElement(val) + if !inline { + enc.newline() + } +} + +func (enc *Encoder) wf(format string, v ...interface{}) { + _, err := fmt.Fprintf(enc.w, format, v...) + if err != nil { + encPanic(err) + } + enc.hasWritten = true +} + +func (enc *Encoder) indentStr(key Key) string { + return strings.Repeat(enc.Indent, len(key)-1) +} + +func encPanic(err error) { + panic(tomlEncodeError{err}) +} + +// Resolve any level of pointers to the actual value (e.g. **string → string). +func eindirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface { + if isMarshaler(v) { + return v + } + if v.CanAddr() { /// Special case for marshalers; see #358. + if pv := v.Addr(); isMarshaler(pv) { + return pv + } + } + return v + } + + if v.IsNil() { + return v + } + + return eindirect(v.Elem()) +} + +func isNil(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go new file mode 100644 index 00000000..f4f390e6 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/error.go @@ -0,0 +1,279 @@ +package toml + +import ( + "fmt" + "strings" +) + +// ParseError is returned when there is an error parsing the TOML syntax such as +// invalid syntax, duplicate keys, etc. +// +// In addition to the error message itself, you can also print detailed location +// information with context by using [ErrorWithPosition]: +// +// toml: error: Key 'fruit' was already created and cannot be used as an array. +// +// At line 4, column 2-7: +// +// 2 | fruit = [] +// 3 | +// 4 | [[fruit]] # Not allowed +// ^^^^^ +// +// [ErrorWithUsage] can be used to print the above with some more detailed usage +// guidance: +// +// toml: error: newlines not allowed within inline tables +// +// At line 1, column 18: +// +// 1 | x = [{ key = 42 # +// ^ +// +// Error help: +// +// Inline tables must always be on a single line: +// +// table = {key = 42, second = 43} +// +// It is invalid to split them over multiple lines like so: +// +// # INVALID +// table = { +// key = 42, +// second = 43 +// } +// +// Use regular for this: +// +// [table] +// key = 42 +// second = 43 +type ParseError struct { + Message string // Short technical message. + Usage string // Longer message with usage guidance; may be blank. + Position Position // Position of the error + LastKey string // Last parsed key, may be blank. + + // Line the error occurred. + // + // Deprecated: use [Position]. + Line int + + err error + input string +} + +// Position of an error. +type Position struct { + Line int // Line number, starting at 1. + Start int // Start of error, as byte offset starting at 0. + Len int // Lenght in bytes. +} + +func (pe ParseError) Error() string { + msg := pe.Message + if msg == "" { // Error from errorf() + msg = pe.err.Error() + } + + if pe.LastKey == "" { + return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg) + } + return fmt.Sprintf("toml: line %d (last key %q): %s", + pe.Position.Line, pe.LastKey, msg) +} + +// ErrorWithUsage() returns the error with detailed location context. +// +// See the documentation on [ParseError]. +func (pe ParseError) ErrorWithPosition() string { + if pe.input == "" { // Should never happen, but just in case. + return pe.Error() + } + + var ( + lines = strings.Split(pe.input, "\n") + col = pe.column(lines) + b = new(strings.Builder) + ) + + msg := pe.Message + if msg == "" { + msg = pe.err.Error() + } + + // TODO: don't show control characters as literals? This may not show up + // well everywhere. + + if pe.Position.Len == 1 { + fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n", + msg, pe.Position.Line, col+1) + } else { + fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n", + msg, pe.Position.Line, col, col+pe.Position.Len) + } + if pe.Position.Line > 2 { + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3]) + } + if pe.Position.Line > 1 { + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2]) + } + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1]) + fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len)) + return b.String() +} + +// ErrorWithUsage() returns the error with detailed location context and usage +// guidance. +// +// See the documentation on [ParseError]. +func (pe ParseError) ErrorWithUsage() string { + m := pe.ErrorWithPosition() + if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" { + lines := strings.Split(strings.TrimSpace(u.Usage()), "\n") + for i := range lines { + if lines[i] != "" { + lines[i] = " " + lines[i] + } + } + return m + "Error help:\n\n" + strings.Join(lines, "\n") + "\n" + } + return m +} + +func (pe ParseError) column(lines []string) int { + var pos, col int + for i := range lines { + ll := len(lines[i]) + 1 // +1 for the removed newline + if pos+ll >= pe.Position.Start { + col = pe.Position.Start - pos + if col < 0 { // Should never happen, but just in case. + col = 0 + } + break + } + pos += ll + } + + return col +} + +type ( + errLexControl struct{ r rune } + errLexEscape struct{ r rune } + errLexUTF8 struct{ b byte } + errLexInvalidNum struct{ v string } + errLexInvalidDate struct{ v string } + errLexInlineTableNL struct{} + errLexStringNL struct{} + errParseRange struct { + i interface{} // int or float + size string // "int64", "uint16", etc. + } + errParseDuration struct{ d string } +) + +func (e errLexControl) Error() string { + return fmt.Sprintf("TOML files cannot contain control characters: '0x%02x'", e.r) +} +func (e errLexControl) Usage() string { return "" } + +func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape in string '\%c'`, e.r) } +func (e errLexEscape) Usage() string { return usageEscape } +func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) } +func (e errLexUTF8) Usage() string { return "" } +func (e errLexInvalidNum) Error() string { return fmt.Sprintf("invalid number: %q", e.v) } +func (e errLexInvalidNum) Usage() string { return "" } +func (e errLexInvalidDate) Error() string { return fmt.Sprintf("invalid date: %q", e.v) } +func (e errLexInvalidDate) Usage() string { return "" } +func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" } +func (e errLexInlineTableNL) Usage() string { return usageInlineNewline } +func (e errLexStringNL) Error() string { return "strings cannot contain newlines" } +func (e errLexStringNL) Usage() string { return usageStringNewline } +func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) } +func (e errParseRange) Usage() string { return usageIntOverflow } +func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) } +func (e errParseDuration) Usage() string { return usageDuration } + +const usageEscape = ` +A '\' inside a "-delimited string is interpreted as an escape character. + +The following escape sequences are supported: +\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX + +To prevent a '\' from being recognized as an escape character, use either: + +- a ' or '''-delimited string; escape characters aren't processed in them; or +- write two backslashes to get a single backslash: '\\'. + +If you're trying to add a Windows path (e.g. "C:\Users\martin") then using '/' +instead of '\' will usually also work: "C:/Users/martin". +` + +const usageInlineNewline = ` +Inline tables must always be on a single line: + + table = {key = 42, second = 43} + +It is invalid to split them over multiple lines like so: + + # INVALID + table = { + key = 42, + second = 43 + } + +Use regular for this: + + [table] + key = 42 + second = 43 +` + +const usageStringNewline = ` +Strings must always be on a single line, and cannot span more than one line: + + # INVALID + string = "Hello, + world!" + +Instead use """ or ''' to split strings over multiple lines: + + string = """Hello, + world!""" +` + +const usageIntOverflow = ` +This number is too large; this may be an error in the TOML, but it can also be a +bug in the program that uses too small of an integer. + +The maximum and minimum values are: + + size │ lowest │ highest + ───────┼────────────────┼────────── + int8 │ -128 │ 127 + int16 │ -32,768 │ 32,767 + int32 │ -2,147,483,648 │ 2,147,483,647 + int64 │ -9.2 × 10¹⁷ │ 9.2 × 10¹⁷ + uint8 │ 0 │ 255 + uint16 │ 0 │ 65535 + uint32 │ 0 │ 4294967295 + uint64 │ 0 │ 1.8 × 10¹⁸ + +int refers to int32 on 32-bit systems and int64 on 64-bit systems. +` + +const usageDuration = ` +A duration must be as "number", without any spaces. Valid units are: + + ns nanoseconds (billionth of a second) + us, µs microseconds (millionth of a second) + ms milliseconds (thousands of a second) + s seconds + m minutes + h hours + +You can combine multiple units; for example "5m10s" for 5 minutes and 10 +seconds. +` diff --git a/vendor/github.com/BurntSushi/toml/internal/tz.go b/vendor/github.com/BurntSushi/toml/internal/tz.go new file mode 100644 index 00000000..022f15bc --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/internal/tz.go @@ -0,0 +1,36 @@ +package internal + +import "time" + +// Timezones used for local datetime, date, and time TOML types. +// +// The exact way times and dates without a timezone should be interpreted is not +// well-defined in the TOML specification and left to the implementation. These +// defaults to current local timezone offset of the computer, but this can be +// changed by changing these variables before decoding. +// +// TODO: +// Ideally we'd like to offer people the ability to configure the used timezone +// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit +// tricky: the reason we use three different variables for this is to support +// round-tripping – without these specific TZ names we wouldn't know which +// format to use. +// +// There isn't a good way to encode this right now though, and passing this sort +// of information also ties in to various related issues such as string format +// encoding, encoding of comments, etc. +// +// So, for the time being, just put this in internal until we can write a good +// comprehensive API for doing all of this. +// +// The reason they're exported is because they're referred from in e.g. +// internal/tag. +// +// Note that this behaviour is valid according to the TOML spec as the exact +// behaviour is left up to implementations. +var ( + localOffset = func() int { _, o := time.Now().Zone(); return o }() + LocalDatetime = time.FixedZone("datetime-local", localOffset) + LocalDate = time.FixedZone("date-local", localOffset) + LocalTime = time.FixedZone("time-local", localOffset) +) diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go new file mode 100644 index 00000000..d4d70871 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -0,0 +1,1233 @@ +package toml + +import ( + "fmt" + "reflect" + "runtime" + "strings" + "unicode" + "unicode/utf8" +) + +type itemType int + +const ( + itemError itemType = iota + itemNIL // used in the parser to indicate no type + itemEOF + itemText + itemString + itemRawString + itemMultilineString + itemRawMultilineString + itemBool + itemInteger + itemFloat + itemDatetime + itemArray // the start of an array + itemArrayEnd + itemTableStart + itemTableEnd + itemArrayTableStart + itemArrayTableEnd + itemKeyStart + itemKeyEnd + itemCommentStart + itemInlineTableStart + itemInlineTableEnd +) + +const eof = 0 + +type stateFn func(lx *lexer) stateFn + +func (p Position) String() string { + return fmt.Sprintf("at line %d; start %d; length %d", p.Line, p.Start, p.Len) +} + +type lexer struct { + input string + start int + pos int + line int + state stateFn + items chan item + + // Allow for backing up up to 4 runes. This is necessary because TOML + // contains 3-rune tokens (""" and '''). + prevWidths [4]int + nprev int // how many of prevWidths are in use + atEOF bool // If we emit an eof, we can still back up, but it is not OK to call next again. + + // A stack of state functions used to maintain context. + // + // The idea is to reuse parts of the state machine in various places. For + // example, values can appear at the top level or within arbitrarily nested + // arrays. The last state on the stack is used after a value has been lexed. + // Similarly for comments. + stack []stateFn +} + +type item struct { + typ itemType + val string + err error + pos Position +} + +func (lx *lexer) nextItem() item { + for { + select { + case item := <-lx.items: + return item + default: + lx.state = lx.state(lx) + //fmt.Printf(" STATE %-24s current: %-10s stack: %s\n", lx.state, lx.current(), lx.stack) + } + } +} + +func lex(input string) *lexer { + lx := &lexer{ + input: input, + state: lexTop, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + line: 1, + } + return lx +} + +func (lx *lexer) push(state stateFn) { + lx.stack = append(lx.stack, state) +} + +func (lx *lexer) pop() stateFn { + if len(lx.stack) == 0 { + return lx.errorf("BUG in lexer: no states to pop") + } + last := lx.stack[len(lx.stack)-1] + lx.stack = lx.stack[0 : len(lx.stack)-1] + return last +} + +func (lx *lexer) current() string { + return lx.input[lx.start:lx.pos] +} + +func (lx lexer) getPos() Position { + p := Position{ + Line: lx.line, + Start: lx.start, + Len: lx.pos - lx.start, + } + if p.Len <= 0 { + p.Len = 1 + } + return p +} + +func (lx *lexer) emit(typ itemType) { + // Needed for multiline strings ending with an incomplete UTF-8 sequence. + if lx.start > lx.pos { + lx.error(errLexUTF8{lx.input[lx.pos]}) + return + } + lx.items <- item{typ: typ, pos: lx.getPos(), val: lx.current()} + lx.start = lx.pos +} + +func (lx *lexer) emitTrim(typ itemType) { + lx.items <- item{typ: typ, pos: lx.getPos(), val: strings.TrimSpace(lx.current())} + lx.start = lx.pos +} + +func (lx *lexer) next() (r rune) { + if lx.atEOF { + panic("BUG in lexer: next called after EOF") + } + if lx.pos >= len(lx.input) { + lx.atEOF = true + return eof + } + + if lx.input[lx.pos] == '\n' { + lx.line++ + } + lx.prevWidths[3] = lx.prevWidths[2] + lx.prevWidths[2] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[0] + if lx.nprev < 4 { + lx.nprev++ + } + + r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) + if r == utf8.RuneError { + lx.error(errLexUTF8{lx.input[lx.pos]}) + return utf8.RuneError + } + + // Note: don't use peek() here, as this calls next(). + if isControl(r) || (r == '\r' && (len(lx.input)-1 == lx.pos || lx.input[lx.pos+1] != '\n')) { + lx.errorControlChar(r) + return utf8.RuneError + } + + lx.prevWidths[0] = w + lx.pos += w + return r +} + +// ignore skips over the pending input before this point. +func (lx *lexer) ignore() { + lx.start = lx.pos +} + +// backup steps back one rune. Can be called 4 times between calls to next. +func (lx *lexer) backup() { + if lx.atEOF { + lx.atEOF = false + return + } + if lx.nprev < 1 { + panic("BUG in lexer: backed up too far") + } + w := lx.prevWidths[0] + lx.prevWidths[0] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[2] + lx.prevWidths[2] = lx.prevWidths[3] + lx.nprev-- + + lx.pos -= w + if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { + lx.line-- + } +} + +// accept consumes the next rune if it's equal to `valid`. +func (lx *lexer) accept(valid rune) bool { + if lx.next() == valid { + return true + } + lx.backup() + return false +} + +// peek returns but does not consume the next rune in the input. +func (lx *lexer) peek() rune { + r := lx.next() + lx.backup() + return r +} + +// skip ignores all input that matches the given predicate. +func (lx *lexer) skip(pred func(rune) bool) { + for { + r := lx.next() + if pred(r) { + continue + } + lx.backup() + lx.ignore() + return + } +} + +// error stops all lexing by emitting an error and returning `nil`. +// +// Note that any value that is a character is escaped if it's a special +// character (newlines, tabs, etc.). +func (lx *lexer) error(err error) stateFn { + if lx.atEOF { + return lx.errorPrevLine(err) + } + lx.items <- item{typ: itemError, pos: lx.getPos(), err: err} + return nil +} + +// errorfPrevline is like error(), but sets the position to the last column of +// the previous line. +// +// This is so that unexpected EOF or NL errors don't show on a new blank line. +func (lx *lexer) errorPrevLine(err error) stateFn { + pos := lx.getPos() + pos.Line-- + pos.Len = 1 + pos.Start = lx.pos - 1 + lx.items <- item{typ: itemError, pos: pos, err: err} + return nil +} + +// errorPos is like error(), but allows explicitly setting the position. +func (lx *lexer) errorPos(start, length int, err error) stateFn { + pos := lx.getPos() + pos.Start = start + pos.Len = length + lx.items <- item{typ: itemError, pos: pos, err: err} + return nil +} + +// errorf is like error, and creates a new error. +func (lx *lexer) errorf(format string, values ...interface{}) stateFn { + if lx.atEOF { + pos := lx.getPos() + pos.Line-- + pos.Len = 1 + pos.Start = lx.pos - 1 + lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)} + return nil + } + lx.items <- item{typ: itemError, pos: lx.getPos(), err: fmt.Errorf(format, values...)} + return nil +} + +func (lx *lexer) errorControlChar(cc rune) stateFn { + return lx.errorPos(lx.pos-1, 1, errLexControl{cc}) +} + +// lexTop consumes elements at the top level of TOML data. +func lexTop(lx *lexer) stateFn { + r := lx.next() + if isWhitespace(r) || isNL(r) { + return lexSkip(lx, lexTop) + } + switch r { + case '#': + lx.push(lexTop) + return lexCommentStart + case '[': + return lexTableStart + case eof: + if lx.pos > lx.start { + return lx.errorf("unexpected EOF") + } + lx.emit(itemEOF) + return nil + } + + // At this point, the only valid item can be a key, so we back up + // and let the key lexer do the rest. + lx.backup() + lx.push(lexTopEnd) + return lexKeyStart +} + +// lexTopEnd is entered whenever a top-level item has been consumed. (A value +// or a table.) It must see only whitespace, and will turn back to lexTop +// upon a newline. If it sees EOF, it will quit the lexer successfully. +func lexTopEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case r == '#': + // a comment will read to a newline for us. + lx.push(lexTop) + return lexCommentStart + case isWhitespace(r): + return lexTopEnd + case isNL(r): + lx.ignore() + return lexTop + case r == eof: + lx.emit(itemEOF) + return nil + } + return lx.errorf( + "expected a top-level item to end with a newline, comment, or EOF, but got %q instead", + r) +} + +// lexTable lexes the beginning of a table. Namely, it makes sure that +// it starts with a character other than '.' and ']'. +// It assumes that '[' has already been consumed. +// It also handles the case that this is an item in an array of tables. +// e.g., '[[name]]'. +func lexTableStart(lx *lexer) stateFn { + if lx.peek() == '[' { + lx.next() + lx.emit(itemArrayTableStart) + lx.push(lexArrayTableEnd) + } else { + lx.emit(itemTableStart) + lx.push(lexTableEnd) + } + return lexTableNameStart +} + +func lexTableEnd(lx *lexer) stateFn { + lx.emit(itemTableEnd) + return lexTopEnd +} + +func lexArrayTableEnd(lx *lexer) stateFn { + if r := lx.next(); r != ']' { + return lx.errorf("expected end of table array name delimiter ']', but got %q instead", r) + } + lx.emit(itemArrayTableEnd) + return lexTopEnd +} + +func lexTableNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == ']' || r == eof: + return lx.errorf("unexpected end of table name (table names cannot be empty)") + case r == '.': + return lx.errorf("unexpected table separator (table names cannot be empty)") + case r == '"' || r == '\'': + lx.ignore() + lx.push(lexTableNameEnd) + return lexQuotedName + default: + lx.push(lexTableNameEnd) + return lexBareName + } +} + +// lexTableNameEnd reads the end of a piece of a table name, optionally +// consuming whitespace. +func lexTableNameEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexTableNameEnd + case r == '.': + lx.ignore() + return lexTableNameStart + case r == ']': + return lx.pop() + default: + return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r) + } +} + +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only 'a' inside 'a.b'. +func lexBareName(lx *lexer) stateFn { + r := lx.next() + if isBareKeyChar(r) { + return lexBareName + } + lx.backup() + lx.emit(itemText) + return lx.pop() +} + +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only '"a"' inside '"a".b'. +func lexQuotedName(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case r == '"': + lx.ignore() // ignore the '"' + return lexString + case r == '\'': + lx.ignore() // ignore the "'" + return lexRawString + case r == eof: + return lx.errorf("unexpected EOF; expected value") + default: + return lx.errorf("expected value but found %q instead", r) + } +} + +// lexKeyStart consumes all key parts until a '='. +func lexKeyStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '=': key name appears blank") + case r == '.': + return lx.errorf("unexpected '.': keys cannot start with a '.'") + case r == '"' || r == '\'': + lx.ignore() + fallthrough + default: // Bare key + lx.emit(itemKeyStart) + return lexKeyNameStart + } +} + +func lexKeyNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '='") + case r == '.': + return lx.errorf("unexpected '.'") + case r == '"' || r == '\'': + lx.ignore() + lx.push(lexKeyEnd) + return lexQuotedName + default: + lx.push(lexKeyEnd) + return lexBareName + } +} + +// lexKeyEnd consumes the end of a key and trims whitespace (up to the key +// separator). +func lexKeyEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexSkip(lx, lexKeyEnd) + case r == eof: + return lx.errorf("unexpected EOF; expected key separator '='") + case r == '.': + lx.ignore() + return lexKeyNameStart + case r == '=': + lx.emit(itemKeyEnd) + return lexSkip(lx, lexValue) + default: + return lx.errorf("expected '.' or '=', but got %q instead", r) + } +} + +// lexValue starts the consumption of a value anywhere a value is expected. +// lexValue will ignore whitespace. +// After a value is lexed, the last state on the next is popped and returned. +func lexValue(lx *lexer) stateFn { + // We allow whitespace to precede a value, but NOT newlines. + // In array syntax, the array states are responsible for ignoring newlines. + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case isDigit(r): + lx.backup() // avoid an extra state and use the same as above + return lexNumberOrDateStart + } + switch r { + case '[': + lx.ignore() + lx.emit(itemArray) + return lexArrayValue + case '{': + lx.ignore() + lx.emit(itemInlineTableStart) + return lexInlineTableValue + case '"': + if lx.accept('"') { + if lx.accept('"') { + lx.ignore() // Ignore """ + return lexMultilineString + } + lx.backup() + } + lx.ignore() // ignore the '"' + return lexString + case '\'': + if lx.accept('\'') { + if lx.accept('\'') { + lx.ignore() // Ignore """ + return lexMultilineRawString + } + lx.backup() + } + lx.ignore() // ignore the "'" + return lexRawString + case '.': // special error case, be kind to users + return lx.errorf("floats must start with a digit, not '.'") + case 'i', 'n': + if (lx.accept('n') && lx.accept('f')) || (lx.accept('a') && lx.accept('n')) { + lx.emit(itemFloat) + return lx.pop() + } + case '-', '+': + return lexDecimalNumberStart + } + if unicode.IsLetter(r) { + // Be permissive here; lexBool will give a nice error if the + // user wrote something like + // x = foo + // (i.e. not 'true' or 'false' but is something else word-like.) + lx.backup() + return lexBool + } + if r == eof { + return lx.errorf("unexpected EOF; expected value") + } + return lx.errorf("expected value but found %q instead", r) +} + +// lexArrayValue consumes one value in an array. It assumes that '[' or ',' +// have already been consumed. All whitespace and newlines are ignored. +func lexArrayValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValue) + case r == '#': + lx.push(lexArrayValue) + return lexCommentStart + case r == ',': + return lx.errorf("unexpected comma") + case r == ']': + return lexArrayEnd + } + + lx.backup() + lx.push(lexArrayValueEnd) + return lexValue +} + +// lexArrayValueEnd consumes everything between the end of an array value and +// the next value (or the end of the array): it ignores whitespace and newlines +// and expects either a ',' or a ']'. +func lexArrayValueEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValueEnd) + case r == '#': + lx.push(lexArrayValueEnd) + return lexCommentStart + case r == ',': + lx.ignore() + return lexArrayValue // move on to the next value + case r == ']': + return lexArrayEnd + default: + return lx.errorf("expected a comma (',') or array terminator (']'), but got %s", runeOrEOF(r)) + } +} + +// lexArrayEnd finishes the lexing of an array. +// It assumes that a ']' has just been consumed. +func lexArrayEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemArrayEnd) + return lx.pop() +} + +// lexInlineTableValue consumes one key/value pair in an inline table. +// It assumes that '{' or ',' have already been consumed. Whitespace is ignored. +func lexInlineTableValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValue) + case isNL(r): + return lx.errorPrevLine(errLexInlineTableNL{}) + case r == '#': + lx.push(lexInlineTableValue) + return lexCommentStart + case r == ',': + return lx.errorf("unexpected comma") + case r == '}': + return lexInlineTableEnd + } + lx.backup() + lx.push(lexInlineTableValueEnd) + return lexKeyStart +} + +// lexInlineTableValueEnd consumes everything between the end of an inline table +// key/value pair and the next pair (or the end of the table): +// it ignores whitespace and expects either a ',' or a '}'. +func lexInlineTableValueEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValueEnd) + case isNL(r): + return lx.errorPrevLine(errLexInlineTableNL{}) + case r == '#': + lx.push(lexInlineTableValueEnd) + return lexCommentStart + case r == ',': + lx.ignore() + lx.skip(isWhitespace) + if lx.peek() == '}' { + return lx.errorf("trailing comma not allowed in inline tables") + } + return lexInlineTableValue + case r == '}': + return lexInlineTableEnd + default: + return lx.errorf("expected a comma or an inline table terminator '}', but got %s instead", runeOrEOF(r)) + } +} + +func runeOrEOF(r rune) string { + if r == eof { + return "end of file" + } + return "'" + string(r) + "'" +} + +// lexInlineTableEnd finishes the lexing of an inline table. +// It assumes that a '}' has just been consumed. +func lexInlineTableEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemInlineTableEnd) + return lx.pop() +} + +// lexString consumes the inner contents of a string. It assumes that the +// beginning '"' has already been consumed and ignored. +func lexString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf(`unexpected EOF; expected '"'`) + case isNL(r): + return lx.errorPrevLine(errLexStringNL{}) + case r == '\\': + lx.push(lexString) + return lexStringEscape + case r == '"': + lx.backup() + lx.emit(itemString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexString +} + +// lexMultilineString consumes the inner contents of a string. It assumes that +// the beginning '"""' has already been consumed and ignored. +func lexMultilineString(lx *lexer) stateFn { + r := lx.next() + switch r { + default: + return lexMultilineString + case eof: + return lx.errorf(`unexpected EOF; expected '"""'`) + case '\\': + return lexMultilineStringEscape + case '"': + /// Found " → try to read two more "". + if lx.accept('"') { + if lx.accept('"') { + /// Peek ahead: the string can contain " and "", including at the + /// end: """str""""" + /// 6 or more at the end, however, is an error. + if lx.peek() == '"' { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + /// + /// Second check is for the edge case: + /// + /// two quotes allowed. + /// vv + /// """lol \"""""" + /// ^^ ^^^---- closing three + /// escaped + /// + /// But ugly, but it works + if strings.HasSuffix(lx.current(), `"""""`) && !strings.HasSuffix(lx.current(), `\"""""`) { + return lx.errorf(`unexpected '""""""'`) + } + lx.backup() + lx.backup() + return lexMultilineString + } + + lx.backup() /// backup: don't include the """ in the item. + lx.backup() + lx.backup() + lx.emit(itemMultilineString) + lx.next() /// Read over ''' again and discard it. + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + return lexMultilineString + } +} + +// lexRawString consumes a raw string. Nothing can be escaped in such a string. +// It assumes that the beginning "'" has already been consumed and ignored. +func lexRawString(lx *lexer) stateFn { + r := lx.next() + switch { + default: + return lexRawString + case r == eof: + return lx.errorf(`unexpected EOF; expected "'"`) + case isNL(r): + return lx.errorPrevLine(errLexStringNL{}) + case r == '\'': + lx.backup() + lx.emit(itemRawString) + lx.next() + lx.ignore() + return lx.pop() + } +} + +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such +// a string. It assumes that the beginning ''' has already been consumed and +// ignored. +func lexMultilineRawString(lx *lexer) stateFn { + r := lx.next() + switch r { + default: + return lexMultilineRawString + case eof: + return lx.errorf(`unexpected EOF; expected "'''"`) + case '\'': + /// Found ' → try to read two more ''. + if lx.accept('\'') { + if lx.accept('\'') { + /// Peek ahead: the string can contain ' and '', including at the + /// end: '''str''''' + /// 6 or more at the end, however, is an error. + if lx.peek() == '\'' { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + if strings.HasSuffix(lx.current(), "'''''") { + return lx.errorf(`unexpected "''''''"`) + } + lx.backup() + lx.backup() + return lexMultilineRawString + } + + lx.backup() /// backup: don't include the ''' in the item. + lx.backup() + lx.backup() + lx.emit(itemRawMultilineString) + lx.next() /// Read over ''' again and discard it. + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + return lexMultilineRawString + } +} + +// lexMultilineStringEscape consumes an escaped character. It assumes that the +// preceding '\\' has already been consumed. +func lexMultilineStringEscape(lx *lexer) stateFn { + if isNL(lx.next()) { /// \ escaping newline. + return lexMultilineString + } + lx.backup() + lx.push(lexMultilineString) + return lexStringEscape(lx) +} + +func lexStringEscape(lx *lexer) stateFn { + r := lx.next() + switch r { + case 'b': + fallthrough + case 't': + fallthrough + case 'n': + fallthrough + case 'f': + fallthrough + case 'r': + fallthrough + case '"': + fallthrough + case ' ', '\t': + // Inside """ .. """ strings you can use \ to escape newlines, and any + // amount of whitespace can be between the \ and \n. + fallthrough + case '\\': + return lx.pop() + case 'u': + return lexShortUnicodeEscape + case 'U': + return lexLongUnicodeEscape + } + return lx.error(errLexEscape{r}) +} + +func lexShortUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 4; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf( + `expected four hexadecimal digits after '\u', but got %q instead`, + lx.current()) + } + } + return lx.pop() +} + +func lexLongUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 8; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf( + `expected eight hexadecimal digits after '\U', but got %q instead`, + lx.current()) + } + } + return lx.pop() +} + +// lexNumberOrDateStart processes the first character of a value which begins +// with a digit. It exists to catch values starting with '0', so that +// lexBaseNumberOrDate can differentiate base prefixed integers from other +// types. +func lexNumberOrDateStart(lx *lexer) stateFn { + r := lx.next() + switch r { + case '0': + return lexBaseNumberOrDate + } + + if !isDigit(r) { + // The only way to reach this state is if the value starts + // with a digit, so specifically treat anything else as an + // error. + return lx.errorf("expected a digit but got %q", r) + } + + return lexNumberOrDate +} + +// lexNumberOrDate consumes either an integer, float or datetime. +func lexNumberOrDate(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '-', ':': + return lexDatetime + case '_': + return lexDecimalNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDatetime consumes a Datetime, to a first approximation. +// The parser validates that it matches one of the accepted formats. +func lexDatetime(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDatetime + } + switch r { + case '-', ':', 'T', 't', ' ', '.', 'Z', 'z', '+': + return lexDatetime + } + + lx.backup() + lx.emitTrim(itemDatetime) + return lx.pop() +} + +// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix. +func lexHexInteger(lx *lexer) stateFn { + r := lx.next() + if isHexadecimal(r) { + return lexHexInteger + } + switch r { + case '_': + return lexHexInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexOctalInteger consumes an octal integer after seeing the '0o' prefix. +func lexOctalInteger(lx *lexer) stateFn { + r := lx.next() + if isOctal(r) { + return lexOctalInteger + } + switch r { + case '_': + return lexOctalInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexBinaryInteger consumes a binary integer after seeing the '0b' prefix. +func lexBinaryInteger(lx *lexer) stateFn { + r := lx.next() + if isBinary(r) { + return lexBinaryInteger + } + switch r { + case '_': + return lexBinaryInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes a decimal float or integer. +func lexDecimalNumber(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDecimalNumber + } + switch r { + case '.', 'e', 'E': + return lexFloat + case '_': + return lexDecimalNumber + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes the first digit of a number beginning with a sign. +// It assumes the sign has already been consumed. Values which start with a sign +// are only allowed to be decimal integers or floats. +// +// The special "nan" and "inf" values are also recognized. +func lexDecimalNumberStart(lx *lexer) stateFn { + r := lx.next() + + // Special error cases to give users better error messages + switch r { + case 'i': + if !lx.accept('n') || !lx.accept('f') { + return lx.errorf("invalid float: '%s'", lx.current()) + } + lx.emit(itemFloat) + return lx.pop() + case 'n': + if !lx.accept('a') || !lx.accept('n') { + return lx.errorf("invalid float: '%s'", lx.current()) + } + lx.emit(itemFloat) + return lx.pop() + case '0': + p := lx.peek() + switch p { + case 'b', 'o', 'x': + return lx.errorf("cannot use sign with non-decimal numbers: '%s%c'", lx.current(), p) + } + case '.': + return lx.errorf("floats must start with a digit, not '.'") + } + + if isDigit(r) { + return lexDecimalNumber + } + + return lx.errorf("expected a digit but got %q", r) +} + +// lexBaseNumberOrDate differentiates between the possible values which +// start with '0'. It assumes that before reaching this state, the initial '0' +// has been consumed. +func lexBaseNumberOrDate(lx *lexer) stateFn { + r := lx.next() + // Note: All datetimes start with at least two digits, so we don't + // handle date characters (':', '-', etc.) here. + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '_': + // Can only be decimal, because there can't be an underscore + // between the '0' and the base designator, and dates can't + // contain underscores. + return lexDecimalNumber + case '.', 'e', 'E': + return lexFloat + case 'b': + r = lx.peek() + if !isBinary(r) { + lx.errorf("not a binary number: '%s%c'", lx.current(), r) + } + return lexBinaryInteger + case 'o': + r = lx.peek() + if !isOctal(r) { + lx.errorf("not an octal number: '%s%c'", lx.current(), r) + } + return lexOctalInteger + case 'x': + r = lx.peek() + if !isHexadecimal(r) { + lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r) + } + return lexHexInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexFloat consumes the elements of a float. It allows any sequence of +// float-like characters, so floats emitted by the lexer are only a first +// approximation and must be validated by the parser. +func lexFloat(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexFloat + } + switch r { + case '_', '.', '-', '+', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemFloat) + return lx.pop() +} + +// lexBool consumes a bool string: 'true' or 'false. +func lexBool(lx *lexer) stateFn { + var rs []rune + for { + r := lx.next() + if !unicode.IsLetter(r) { + lx.backup() + break + } + rs = append(rs, r) + } + s := string(rs) + switch s { + case "true", "false": + lx.emit(itemBool) + return lx.pop() + } + return lx.errorf("expected value but found %q instead", s) +} + +// lexCommentStart begins the lexing of a comment. It will emit +// itemCommentStart and consume no characters, passing control to lexComment. +func lexCommentStart(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemCommentStart) + return lexComment +} + +// lexComment lexes an entire comment. It assumes that '#' has been consumed. +// It will consume *up to* the first newline character, and pass control +// back to the last state on the stack. +func lexComment(lx *lexer) stateFn { + switch r := lx.next(); { + case isNL(r) || r == eof: + lx.backup() + lx.emit(itemText) + return lx.pop() + default: + return lexComment + } +} + +// lexSkip ignores all slurped input and moves on to the next state. +func lexSkip(lx *lexer, nextState stateFn) stateFn { + lx.ignore() + return nextState +} + +func (s stateFn) String() string { + name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name() + if i := strings.LastIndexByte(name, '.'); i > -1 { + name = name[i+1:] + } + if s == nil { + name = "" + } + return name + "()" +} + +func (itype itemType) String() string { + switch itype { + case itemError: + return "Error" + case itemNIL: + return "NIL" + case itemEOF: + return "EOF" + case itemText: + return "Text" + case itemString, itemRawString, itemMultilineString, itemRawMultilineString: + return "String" + case itemBool: + return "Bool" + case itemInteger: + return "Integer" + case itemFloat: + return "Float" + case itemDatetime: + return "DateTime" + case itemTableStart: + return "TableStart" + case itemTableEnd: + return "TableEnd" + case itemKeyStart: + return "KeyStart" + case itemKeyEnd: + return "KeyEnd" + case itemArray: + return "Array" + case itemArrayEnd: + return "ArrayEnd" + case itemCommentStart: + return "CommentStart" + case itemInlineTableStart: + return "InlineTableStart" + case itemInlineTableEnd: + return "InlineTableEnd" + } + panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) +} + +func (item item) String() string { + return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) +} + +func isWhitespace(r rune) bool { return r == '\t' || r == ' ' } +func isNL(r rune) bool { return r == '\n' || r == '\r' } +func isControl(r rune) bool { // Control characters except \t, \r, \n + switch r { + case '\t', '\r', '\n': + return false + default: + return (r >= 0x00 && r <= 0x1f) || r == 0x7f + } +} +func isDigit(r rune) bool { return r >= '0' && r <= '9' } +func isBinary(r rune) bool { return r == '0' || r == '1' } +func isOctal(r rune) bool { return r >= '0' && r <= '7' } +func isHexadecimal(r rune) bool { + return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F') +} +func isBareKeyChar(r rune) bool { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || r == '-' +} diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go new file mode 100644 index 00000000..71847a04 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/meta.go @@ -0,0 +1,121 @@ +package toml + +import ( + "strings" +) + +// MetaData allows access to meta information about TOML data that's not +// accessible otherwise. +// +// It allows checking if a key is defined in the TOML data, whether any keys +// were undecoded, and the TOML type of a key. +type MetaData struct { + context Key // Used only during decoding. + + keyInfo map[string]keyInfo + mapping map[string]interface{} + keys []Key + decoded map[string]struct{} + data []byte // Input file; for errors. +} + +// IsDefined reports if the key exists in the TOML data. +// +// The key should be specified hierarchically, for example to access the TOML +// key "a.b.c" you would use IsDefined("a", "b", "c"). Keys are case sensitive. +// +// Returns false for an empty key. +func (md *MetaData) IsDefined(key ...string) bool { + if len(key) == 0 { + return false + } + + var ( + hash map[string]interface{} + ok bool + hashOrVal interface{} = md.mapping + ) + for _, k := range key { + if hash, ok = hashOrVal.(map[string]interface{}); !ok { + return false + } + if hashOrVal, ok = hash[k]; !ok { + return false + } + } + return true +} + +// Type returns a string representation of the type of the key specified. +// +// Type will return the empty string if given an empty key or a key that does +// not exist. Keys are case sensitive. +func (md *MetaData) Type(key ...string) string { + if ki, ok := md.keyInfo[Key(key).String()]; ok { + return ki.tomlType.typeString() + } + return "" +} + +// Keys returns a slice of every key in the TOML data, including key groups. +// +// Each key is itself a slice, where the first element is the top of the +// hierarchy and the last is the most specific. The list will have the same +// order as the keys appeared in the TOML data. +// +// All keys returned are non-empty. +func (md *MetaData) Keys() []Key { + return md.keys +} + +// Undecoded returns all keys that have not been decoded in the order in which +// they appear in the original TOML document. +// +// This includes keys that haven't been decoded because of a [Primitive] value. +// Once the Primitive value is decoded, the keys will be considered decoded. +// +// Also note that decoding into an empty interface will result in no decoding, +// and so no keys will be considered decoded. +// +// In this sense, the Undecoded keys correspond to keys in the TOML document +// that do not have a concrete type in your representation. +func (md *MetaData) Undecoded() []Key { + undecoded := make([]Key, 0, len(md.keys)) + for _, key := range md.keys { + if _, ok := md.decoded[key.String()]; !ok { + undecoded = append(undecoded, key) + } + } + return undecoded +} + +// Key represents any TOML key, including key groups. Use [MetaData.Keys] to get +// values of this type. +type Key []string + +func (k Key) String() string { + ss := make([]string, len(k)) + for i := range k { + ss[i] = k.maybeQuoted(i) + } + return strings.Join(ss, ".") +} + +func (k Key) maybeQuoted(i int) string { + if k[i] == "" { + return `""` + } + for _, c := range k[i] { + if !isBareKeyChar(c) { + return `"` + dblQuotedReplacer.Replace(k[i]) + `"` + } + } + return k[i] +} + +func (k Key) add(piece string) Key { + newKey := make(Key, len(k)+1) + copy(newKey, k) + newKey[len(k)] = piece + return newKey +} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go new file mode 100644 index 00000000..d2542d6f --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -0,0 +1,781 @@ +package toml + +import ( + "fmt" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/BurntSushi/toml/internal" +) + +type parser struct { + lx *lexer + context Key // Full key for the current hash in scope. + currentKey string // Base key name for everything except hashes. + pos Position // Current position in the TOML file. + + ordered []Key // List of keys in the order that they appear in the TOML data. + + keyInfo map[string]keyInfo // Map keyname → info about the TOML key. + mapping map[string]interface{} // Map keyname → key value. + implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names"). +} + +type keyInfo struct { + pos Position + tomlType tomlType +} + +func parse(data string) (p *parser, err error) { + defer func() { + if r := recover(); r != nil { + if pErr, ok := r.(ParseError); ok { + pErr.input = data + err = pErr + return + } + panic(r) + } + }() + + // Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString() + // which mangles stuff. + if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { + data = data[2:] + } + + // Examine first few bytes for NULL bytes; this probably means it's a UTF-16 + // file (second byte in surrogate pair being NULL). Again, do this here to + // avoid having to deal with UTF-8/16 stuff in the lexer. + ex := 6 + if len(data) < 6 { + ex = len(data) + } + if i := strings.IndexRune(data[:ex], 0); i > -1 { + return nil, ParseError{ + Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8", + Position: Position{Line: 1, Start: i, Len: 1}, + Line: 1, + input: data, + } + } + + p = &parser{ + keyInfo: make(map[string]keyInfo), + mapping: make(map[string]interface{}), + lx: lex(data), + ordered: make([]Key, 0), + implicits: make(map[string]struct{}), + } + for { + item := p.next() + if item.typ == itemEOF { + break + } + p.topLevel(item) + } + + return p, nil +} + +func (p *parser) panicErr(it item, err error) { + panic(ParseError{ + err: err, + Position: it.pos, + Line: it.pos.Len, + LastKey: p.current(), + }) +} + +func (p *parser) panicItemf(it item, format string, v ...interface{}) { + panic(ParseError{ + Message: fmt.Sprintf(format, v...), + Position: it.pos, + Line: it.pos.Len, + LastKey: p.current(), + }) +} + +func (p *parser) panicf(format string, v ...interface{}) { + panic(ParseError{ + Message: fmt.Sprintf(format, v...), + Position: p.pos, + Line: p.pos.Line, + LastKey: p.current(), + }) +} + +func (p *parser) next() item { + it := p.lx.nextItem() + //fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.pos.Line, it.val) + if it.typ == itemError { + if it.err != nil { + panic(ParseError{ + Position: it.pos, + Line: it.pos.Line, + LastKey: p.current(), + err: it.err, + }) + } + + p.panicItemf(it, "%s", it.val) + } + return it +} + +func (p *parser) nextPos() item { + it := p.next() + p.pos = it.pos + return it +} + +func (p *parser) bug(format string, v ...interface{}) { + panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) +} + +func (p *parser) expect(typ itemType) item { + it := p.next() + p.assertEqual(typ, it.typ) + return it +} + +func (p *parser) assertEqual(expected, got itemType) { + if expected != got { + p.bug("Expected '%s' but got '%s'.", expected, got) + } +} + +func (p *parser) topLevel(item item) { + switch item.typ { + case itemCommentStart: // # .. + p.expect(itemText) + case itemTableStart: // [ .. ] + name := p.nextPos() + + var key Key + for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) + } + p.assertEqual(itemTableEnd, name.typ) + + p.addContext(key, false) + p.setType("", tomlHash, item.pos) + p.ordered = append(p.ordered, key) + case itemArrayTableStart: // [[ .. ]] + name := p.nextPos() + + var key Key + for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) + } + p.assertEqual(itemArrayTableEnd, name.typ) + + p.addContext(key, true) + p.setType("", tomlArrayHash, item.pos) + p.ordered = append(p.ordered, key) + case itemKeyStart: // key = .. + outerContext := p.context + /// Read all the key parts (e.g. 'a' and 'b' in 'a.b') + k := p.nextPos() + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key[len(key)-1] + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key[:len(key)-1] + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + + /// Set value. + vItem := p.next() + val, typ := p.value(vItem, false) + p.set(p.currentKey, val, typ, vItem.pos) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + + /// Remove the context we added (preserving any context from [tbl] lines). + p.context = outerContext + p.currentKey = "" + default: + p.bug("Unexpected type at top level: %s", item.typ) + } +} + +// Gets a string for a key (or part of a key in a table name). +func (p *parser) keyString(it item) string { + switch it.typ { + case itemText: + return it.val + case itemString, itemMultilineString, + itemRawString, itemRawMultilineString: + s, _ := p.value(it, false) + return s.(string) + default: + p.bug("Unexpected key type: %s", it.typ) + } + panic("unreachable") +} + +var datetimeRepl = strings.NewReplacer( + "z", "Z", + "t", "T", + " ", "T") + +// value translates an expected value from the lexer into a Go value wrapped +// as an empty interface. +func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) { + switch it.typ { + case itemString: + return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it) + case itemMultilineString: + return p.replaceEscapes(it, stripFirstNewline(p.stripEscapedNewlines(it.val))), p.typeOfPrimitive(it) + case itemRawString: + return it.val, p.typeOfPrimitive(it) + case itemRawMultilineString: + return stripFirstNewline(it.val), p.typeOfPrimitive(it) + case itemInteger: + return p.valueInteger(it) + case itemFloat: + return p.valueFloat(it) + case itemBool: + switch it.val { + case "true": + return true, p.typeOfPrimitive(it) + case "false": + return false, p.typeOfPrimitive(it) + default: + p.bug("Expected boolean value, but got '%s'.", it.val) + } + case itemDatetime: + return p.valueDatetime(it) + case itemArray: + return p.valueArray(it) + case itemInlineTableStart: + return p.valueInlineTable(it, parentIsArray) + default: + p.bug("Unexpected value type: %s", it.typ) + } + panic("unreachable") +} + +func (p *parser) valueInteger(it item) (interface{}, tomlType) { + if !numUnderscoresOK(it.val) { + p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val) + } + if numHasLeadingZero(it.val) { + p.panicItemf(it, "Invalid integer %q: cannot have leading zeroes", it.val) + } + + num, err := strconv.ParseInt(it.val, 0, 64) + if err != nil { + // Distinguish integer values. Normally, it'd be a bug if the lexer + // provides an invalid integer, but it's possible that the number is + // out of range of valid values (which the lexer cannot determine). + // So mark the former as a bug but the latter as a legitimate user + // error. + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicErr(it, errParseRange{i: it.val, size: "int64"}) + } else { + p.bug("Expected integer value, but got '%s'.", it.val) + } + } + return num, p.typeOfPrimitive(it) +} + +func (p *parser) valueFloat(it item) (interface{}, tomlType) { + parts := strings.FieldsFunc(it.val, func(r rune) bool { + switch r { + case '.', 'e', 'E': + return true + } + return false + }) + for _, part := range parts { + if !numUnderscoresOK(part) { + p.panicItemf(it, "Invalid float %q: underscores must be surrounded by digits", it.val) + } + } + if len(parts) > 0 && numHasLeadingZero(parts[0]) { + p.panicItemf(it, "Invalid float %q: cannot have leading zeroes", it.val) + } + if !numPeriodsOK(it.val) { + // As a special case, numbers like '123.' or '1.e2', + // which are valid as far as Go/strconv are concerned, + // must be rejected because TOML says that a fractional + // part consists of '.' followed by 1+ digits. + p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val) + } + val := strings.Replace(it.val, "_", "", -1) + if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does. + val = "nan" + } + num, err := strconv.ParseFloat(val, 64) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicErr(it, errParseRange{i: it.val, size: "float64"}) + } else { + p.panicItemf(it, "Invalid float value: %q", it.val) + } + } + return num, p.typeOfPrimitive(it) +} + +var dtTypes = []struct { + fmt string + zone *time.Location +}{ + {time.RFC3339Nano, time.Local}, + {"2006-01-02T15:04:05.999999999", internal.LocalDatetime}, + {"2006-01-02", internal.LocalDate}, + {"15:04:05.999999999", internal.LocalTime}, +} + +func (p *parser) valueDatetime(it item) (interface{}, tomlType) { + it.val = datetimeRepl.Replace(it.val) + var ( + t time.Time + ok bool + err error + ) + for _, dt := range dtTypes { + t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) + if err == nil { + ok = true + break + } + } + if !ok { + p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val) + } + return t, p.typeOfPrimitive(it) +} + +func (p *parser) valueArray(it item) (interface{}, tomlType) { + p.setType(p.currentKey, tomlArray, it.pos) + + var ( + types []tomlType + + // Initialize to a non-nil empty slice. This makes it consistent with + // how S = [] decodes into a non-nil slice inside something like struct + // { S []string }. See #338 + array = []interface{}{} + ) + for it = p.next(); it.typ != itemArrayEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + val, typ := p.value(it, true) + array = append(array, val) + types = append(types, typ) + + // XXX: types isn't used here, we need it to record the accurate type + // information. + // + // Not entirely sure how to best store this; could use "key[0]", + // "key[1]" notation, or maybe store it on the Array type? + } + return array, tomlArray +} + +func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) { + var ( + hash = make(map[string]interface{}) + outerContext = p.context + outerKey = p.currentKey + ) + + p.context = append(p.context, p.currentKey) + prevContext := p.context + p.currentKey = "" + + p.addImplicit(p.context) + p.addContext(p.context, parentIsArray) + + /// Loop over all table key/value pairs. + for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + /// Read all key parts. + k := p.nextPos() + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key[len(key)-1] + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key[:len(key)-1] + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + + /// Set the value. + val, typ := p.value(p.next(), false) + p.set(p.currentKey, val, typ, it.pos) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + hash[p.currentKey] = val + + /// Restore context. + p.context = prevContext + } + p.context = outerContext + p.currentKey = outerKey + return hash, tomlHash +} + +// numHasLeadingZero checks if this number has leading zeroes, allowing for '0', +// +/- signs, and base prefixes. +func numHasLeadingZero(s string) bool { + if len(s) > 1 && s[0] == '0' && !(s[1] == 'b' || s[1] == 'o' || s[1] == 'x') { // Allow 0b, 0o, 0x + return true + } + if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' { + return true + } + return false +} + +// numUnderscoresOK checks whether each underscore in s is surrounded by +// characters that are not underscores. +func numUnderscoresOK(s string) bool { + switch s { + case "nan", "+nan", "-nan", "inf", "-inf", "+inf": + return true + } + accept := false + for _, r := range s { + if r == '_' { + if !accept { + return false + } + } + + // isHexadecimal is a superset of all the permissable characters + // surrounding an underscore. + accept = isHexadecimal(r) + } + return accept +} + +// numPeriodsOK checks whether every period in s is followed by a digit. +func numPeriodsOK(s string) bool { + period := false + for _, r := range s { + if period && !isDigit(r) { + return false + } + period = r == '.' + } + return !period +} + +// Set the current context of the parser, where the context is either a hash or +// an array of hashes, depending on the value of the `array` parameter. +// +// Establishing the context also makes sure that the key isn't a duplicate, and +// will create implicit hashes automatically. +func (p *parser) addContext(key Key, array bool) { + var ok bool + + // Always start at the top level and drill down for our context. + hashContext := p.mapping + keyContext := make(Key, 0) + + // We only need implicit hashes for key[0:-1] + for _, k := range key[0 : len(key)-1] { + _, ok = hashContext[k] + keyContext = append(keyContext, k) + + // No key? Make an implicit hash and move on. + if !ok { + p.addImplicit(keyContext) + hashContext[k] = make(map[string]interface{}) + } + + // If the hash context is actually an array of tables, then set + // the hash context to the last element in that array. + // + // Otherwise, it better be a table, since this MUST be a key group (by + // virtue of it not being the last element in a key). + switch t := hashContext[k].(type) { + case []map[string]interface{}: + hashContext = t[len(t)-1] + case map[string]interface{}: + hashContext = t + default: + p.panicf("Key '%s' was already created as a hash.", keyContext) + } + } + + p.context = keyContext + if array { + // If this is the first element for this array, then allocate a new + // list of tables for it. + k := key[len(key)-1] + if _, ok := hashContext[k]; !ok { + hashContext[k] = make([]map[string]interface{}, 0, 4) + } + + // Add a new table. But make sure the key hasn't already been used + // for something else. + if hash, ok := hashContext[k].([]map[string]interface{}); ok { + hashContext[k] = append(hash, make(map[string]interface{})) + } else { + p.panicf("Key '%s' was already created and cannot be used as an array.", key) + } + } else { + p.setValue(key[len(key)-1], make(map[string]interface{})) + } + p.context = append(p.context, key[len(key)-1]) +} + +// set calls setValue and setType. +func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) { + p.setValue(key, val) + p.setType(key, typ, pos) + +} + +// setValue sets the given key to the given value in the current context. +// It will make sure that the key hasn't already been defined, account for +// implicit key groups. +func (p *parser) setValue(key string, value interface{}) { + var ( + tmpHash interface{} + ok bool + hash = p.mapping + keyContext Key + ) + for _, k := range p.context { + keyContext = append(keyContext, k) + if tmpHash, ok = hash[k]; !ok { + p.bug("Context for key '%s' has not been established.", keyContext) + } + switch t := tmpHash.(type) { + case []map[string]interface{}: + // The context is a table of hashes. Pick the most recent table + // defined as the current hash. + hash = t[len(t)-1] + case map[string]interface{}: + hash = t + default: + p.panicf("Key '%s' has already been defined.", keyContext) + } + } + keyContext = append(keyContext, key) + + if _, ok := hash[key]; ok { + // Normally redefining keys isn't allowed, but the key could have been + // defined implicitly and it's allowed to be redefined concretely. (See + // the `valid/implicit-and-explicit-after.toml` in toml-test) + // + // But we have to make sure to stop marking it as an implicit. (So that + // another redefinition provokes an error.) + // + // Note that since it has already been defined (as a hash), we don't + // want to overwrite it. So our business is done. + if p.isArray(keyContext) { + p.removeImplicit(keyContext) + hash[key] = value + return + } + if p.isImplicit(keyContext) { + p.removeImplicit(keyContext) + return + } + + // Otherwise, we have a concrete key trying to override a previous + // key, which is *always* wrong. + p.panicf("Key '%s' has already been defined.", keyContext) + } + + hash[key] = value +} + +// setType sets the type of a particular value at a given key. It should be +// called immediately AFTER setValue. +// +// Note that if `key` is empty, then the type given will be applied to the +// current context (which is either a table or an array of tables). +func (p *parser) setType(key string, typ tomlType, pos Position) { + keyContext := make(Key, 0, len(p.context)+1) + keyContext = append(keyContext, p.context...) + if len(key) > 0 { // allow type setting for hashes + keyContext = append(keyContext, key) + } + // Special case to make empty keys ("" = 1) work. + // Without it it will set "" rather than `""`. + // TODO: why is this needed? And why is this only needed here? + if len(keyContext) == 0 { + keyContext = Key{""} + } + p.keyInfo[keyContext.String()] = keyInfo{tomlType: typ, pos: pos} +} + +// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and +// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly). +func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} } +func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) } +func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok } +func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray } +func (p *parser) addImplicitContext(key Key) { + p.addImplicit(key) + p.addContext(key, false) +} + +// current returns the full key name of the current context. +func (p *parser) current() string { + if len(p.currentKey) == 0 { + return p.context.String() + } + if len(p.context) == 0 { + return p.currentKey + } + return fmt.Sprintf("%s.%s", p.context, p.currentKey) +} + +func stripFirstNewline(s string) string { + if len(s) > 0 && s[0] == '\n' { + return s[1:] + } + if len(s) > 1 && s[0] == '\r' && s[1] == '\n' { + return s[2:] + } + return s +} + +// Remove newlines inside triple-quoted strings if a line ends with "\". +func (p *parser) stripEscapedNewlines(s string) string { + split := strings.Split(s, "\n") + if len(split) < 1 { + return s + } + + escNL := false // Keep track of the last non-blank line was escaped. + for i, line := range split { + line = strings.TrimRight(line, " \t\r") + + if len(line) == 0 || line[len(line)-1] != '\\' { + split[i] = strings.TrimRight(split[i], "\r") + if !escNL && i != len(split)-1 { + split[i] += "\n" + } + continue + } + + escBS := true + for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- { + escBS = !escBS + } + if escNL { + line = strings.TrimLeft(line, " \t\r") + } + escNL = !escBS + + if escBS { + split[i] += "\n" + continue + } + + if i == len(split)-1 { + p.panicf("invalid escape: '\\ '") + } + + split[i] = line[:len(line)-1] // Remove \ + if len(split)-1 > i { + split[i+1] = strings.TrimLeft(split[i+1], " \t\r") + } + } + return strings.Join(split, "") +} + +func (p *parser) replaceEscapes(it item, str string) string { + replaced := make([]rune, 0, len(str)) + s := []byte(str) + r := 0 + for r < len(s) { + if s[r] != '\\' { + c, size := utf8.DecodeRune(s[r:]) + r += size + replaced = append(replaced, c) + continue + } + r += 1 + if r >= len(s) { + p.bug("Escape sequence at end of string.") + return "" + } + switch s[r] { + default: + p.bug("Expected valid escape code after \\, but got %q.", s[r]) + case ' ', '\t': + p.panicItemf(it, "invalid escape: '\\%c'", s[r]) + case 'b': + replaced = append(replaced, rune(0x0008)) + r += 1 + case 't': + replaced = append(replaced, rune(0x0009)) + r += 1 + case 'n': + replaced = append(replaced, rune(0x000A)) + r += 1 + case 'f': + replaced = append(replaced, rune(0x000C)) + r += 1 + case 'r': + replaced = append(replaced, rune(0x000D)) + r += 1 + case '"': + replaced = append(replaced, rune(0x0022)) + r += 1 + case '\\': + replaced = append(replaced, rune(0x005C)) + r += 1 + case 'u': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+5). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5]) + replaced = append(replaced, escaped) + r += 5 + case 'U': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+9). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9]) + replaced = append(replaced, escaped) + r += 9 + } + } + return string(replaced) +} + +func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune { + s := string(bs) + hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) + if err != nil { + p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err) + } + if !utf8.ValidRune(rune(hex)) { + p.panicItemf(it, "Escaped character '\\u%s' is not valid UTF-8.", s) + } + return rune(hex) +} diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go new file mode 100644 index 00000000..254ca82e --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -0,0 +1,242 @@ +package toml + +// Struct field handling is adapted from code in encoding/json: +// +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the Go distribution. + +import ( + "reflect" + "sort" + "sync" +) + +// A field represents a single field found in a struct. +type field struct { + name string // the name of the field (`toml` tag included) + tag bool // whether field has a `toml` tag + index []int // represents the depth of an anonymous field + typ reflect.Type // the type of the field +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from toml tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that TOML should recognize for the given +// type. The algorithm is breadth-first search over the set of structs to +// include - the top struct and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + var count map[reflect.Type]int + var nextCount map[reflect.Type]int + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + opts := getOptions(sf.Tag) + if opts.skip { + continue + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := opts.name != "" + name := opts.name + if name == "" { + name = sf.Name + } + fields = append(fields, field{name, tagged, index, ft}) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + f := field{name: ft.Name(), index: index, typ: ft} + next = append(next, f) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with TOML tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// TOML tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} diff --git a/vendor/github.com/BurntSushi/toml/type_toml.go b/vendor/github.com/BurntSushi/toml/type_toml.go new file mode 100644 index 00000000..4e90d773 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_toml.go @@ -0,0 +1,70 @@ +package toml + +// tomlType represents any Go type that corresponds to a TOML type. +// While the first draft of the TOML spec has a simplistic type system that +// probably doesn't need this level of sophistication, we seem to be militating +// toward adding real composite types. +type tomlType interface { + typeString() string +} + +// typeEqual accepts any two types and returns true if they are equal. +func typeEqual(t1, t2 tomlType) bool { + if t1 == nil || t2 == nil { + return false + } + return t1.typeString() == t2.typeString() +} + +func typeIsTable(t tomlType) bool { + return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) +} + +type tomlBaseType string + +func (btype tomlBaseType) typeString() string { + return string(btype) +} + +func (btype tomlBaseType) String() string { + return btype.typeString() +} + +var ( + tomlInteger tomlBaseType = "Integer" + tomlFloat tomlBaseType = "Float" + tomlDatetime tomlBaseType = "Datetime" + tomlString tomlBaseType = "String" + tomlBool tomlBaseType = "Bool" + tomlArray tomlBaseType = "Array" + tomlHash tomlBaseType = "Hash" + tomlArrayHash tomlBaseType = "ArrayHash" +) + +// typeOfPrimitive returns a tomlType of any primitive value in TOML. +// Primitive values are: Integer, Float, Datetime, String and Bool. +// +// Passing a lexer item other than the following will cause a BUG message +// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. +func (p *parser) typeOfPrimitive(lexItem item) tomlType { + switch lexItem.typ { + case itemInteger: + return tomlInteger + case itemFloat: + return tomlFloat + case itemDatetime: + return tomlDatetime + case itemString: + return tomlString + case itemMultilineString: + return tomlString + case itemRawString: + return tomlString + case itemRawMultilineString: + return tomlString + case itemBool: + return tomlBool + } + p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) + panic("unreachable") +} diff --git a/vendor/github.com/bmatcuk/doublestar/v4/.codecov.yml b/vendor/github.com/bmatcuk/doublestar/v4/.codecov.yml new file mode 100644 index 00000000..db6e504a --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/.codecov.yml @@ -0,0 +1,10 @@ +coverage: + status: + project: + default: + threshold: 1% + patch: + default: + target: 70% +ignore: + - globoptions.go diff --git a/vendor/github.com/bmatcuk/doublestar/v4/.gitignore b/vendor/github.com/bmatcuk/doublestar/v4/.gitignore new file mode 100644 index 00000000..af212ecc --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/.gitignore @@ -0,0 +1,32 @@ +# vi +*~ +*.swp +*.swo + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# test directory +test/ diff --git a/vendor/github.com/bmatcuk/doublestar/v4/LICENSE b/vendor/github.com/bmatcuk/doublestar/v4/LICENSE new file mode 100644 index 00000000..309c9d1d --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2014 Bob Matcuk + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/bmatcuk/doublestar/v4/README.md b/vendor/github.com/bmatcuk/doublestar/v4/README.md new file mode 100644 index 00000000..70117eff --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/README.md @@ -0,0 +1,402 @@ +# doublestar + +Path pattern matching and globbing supporting `doublestar` (`**`) patterns. + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/bmatcuk/doublestar)](https://pkg.go.dev/github.com/bmatcuk/doublestar/v4) +[![Release](https://img.shields.io/github/release/bmatcuk/doublestar.svg?branch=master)](https://github.com/bmatcuk/doublestar/releases) +[![Build Status](https://github.com/bmatcuk/doublestar/actions/workflows/test.yml/badge.svg)](https://github.com/bmatcuk/doublestar/actions) +[![codecov.io](https://img.shields.io/codecov/c/github/bmatcuk/doublestar.svg?branch=master)](https://codecov.io/github/bmatcuk/doublestar?branch=master) +[![Sponsor](https://img.shields.io/static/v1?label=Sponsor&message=%E2%9D%A4&logo=GitHub&color=%23fe8e86)](https://github.com/sponsors/bmatcuk) + +## About + +#### [Upgrading?](UPGRADING.md) + +**doublestar** is a [golang] implementation of path pattern matching and +globbing with support for "doublestar" (aka globstar: `**`) patterns. + +doublestar patterns match files and directories recursively. For example, if +you had the following directory structure: + +```bash +grandparent +`-- parent + |-- child1 + `-- child2 +``` + +You could find the children with patterns such as: `**/child*`, +`grandparent/**/child?`, `**/parent/*`, or even just `**` by itself (which will +return all files and directories recursively). + +Bash's globstar is doublestar's inspiration and, as such, works similarly. +Note that the doublestar must appear as a path component by itself. A pattern +such as `/path**` is invalid and will be treated the same as `/path*`, but +`/path*/**` should achieve the desired result. Additionally, `/path/**` will +match all directories and files under the path directory, but `/path/**/` will +only match directories. + +v4 is a complete rewrite with a focus on performance. Additionally, +[doublestar] has been updated to use the new [io/fs] package for filesystem +access. As a result, it is only supported by [golang] v1.16+. + +## Installation + +**doublestar** can be installed via `go get`: + +```bash +go get github.com/bmatcuk/doublestar/v4 +``` + +To use it in your code, you must import it: + +```go +import "github.com/bmatcuk/doublestar/v4" +``` + +## Usage + +### ErrBadPattern + +```go +doublestar.ErrBadPattern +``` + +Returned by various functions to report that the pattern is malformed. At the +moment, this value is equal to `path.ErrBadPattern`, but, for portability, this +equivalence should probably not be relied upon. + +### Match + +```go +func Match(pattern, name string) (bool, error) +``` + +Match returns true if `name` matches the file name `pattern` ([see +"patterns"]). `name` and `pattern` are split on forward slash (`/`) characters +and may be relative or absolute. + +Match requires pattern to match all of name, not just a substring. The only +possible returned error is `ErrBadPattern`, when pattern is malformed. + +Note: this is meant as a drop-in replacement for `path.Match()` which always +uses `'/'` as the path separator. If you want to support systems which use a +different path separator (such as Windows), what you want is `PathMatch()`. +Alternatively, you can run `filepath.ToSlash()` on both pattern and name and +then use this function. + +Note: users should _not_ count on the returned error, +`doublestar.ErrBadPattern`, being equal to `path.ErrBadPattern`. + + +### PathMatch + +```go +func PathMatch(pattern, name string) (bool, error) +``` + +PathMatch returns true if `name` matches the file name `pattern` ([see +"patterns"]). The difference between Match and PathMatch is that PathMatch will +automatically use your system's path separator to split `name` and `pattern`. +On systems where the path separator is `'\'`, escaping will be disabled. + +Note: this is meant as a drop-in replacement for `filepath.Match()`. It assumes +that both `pattern` and `name` are using the system's path separator. If you +can't be sure of that, use `filepath.ToSlash()` on both `pattern` and `name`, +and then use the `Match()` function instead. + +### GlobOption + +Options that may be passed to `Glob`, `GlobWalk`, or `FilepathGlob`. Any number +of options may be passed to these functions, and in any order, as the last +argument(s). + +```go +WithFailOnIOErrors() +``` + +If passed, doublestar will abort and return IO errors when encountered. Note +that if the glob pattern references a path that does not exist (such as +`nonexistent/path/*`), this is _not_ considered an IO error: it is considered a +pattern with no matches. + +```go +WithFailOnPatternNotExist() +``` + +If passed, doublestar will abort and return `doublestar.ErrPatternNotExist` if +the pattern references a path that does not exist before any meta characters +such as `nonexistent/path/*`. Note that alts (ie, `{...}`) are expanded before +this check. In other words, a pattern such as `{a,b}/*` may fail if either `a` +or `b` do not exist but `*/{a,b}` will never fail because the star may match +nothing. + +```go +WithFilesOnly() +``` + +If passed, doublestar will only return "files" from `Glob`, `GlobWalk`, or +`FilepathGlob`. In this context, "files" are anything that is not a directory +or a symlink to a directory. + +Note: if combined with the WithNoFollow option, symlinks to directories _will_ +be included in the result since no attempt is made to follow the symlink. + +```go +WithNoFollow() +``` + +If passed, doublestar will not follow symlinks while traversing the filesystem. +However, due to io/fs's _very_ poor support for querying the filesystem about +symlinks, there's a caveat here: if part of the pattern before any meta +characters contains a reference to a symlink, it will be followed. For example, +a pattern such as `path/to/symlink/*` will be followed assuming it is a valid +symlink to a directory. However, from this same example, a pattern such as +`path/to/**` will not traverse the `symlink`, nor would `path/*/symlink/*` + +Note: if combined with the WithFilesOnly option, symlinks to directories _will_ +be included in the result since no attempt is made to follow the symlink. + +### Glob + +```go +func Glob(fsys fs.FS, pattern string, opts ...GlobOption) ([]string, error) +``` + +Glob returns the names of all files matching pattern or nil if there is no +matching file. The syntax of patterns is the same as in `Match()`. The pattern +may describe hierarchical names such as `usr/*/bin/ed`. + +Glob ignores file system errors such as I/O errors reading directories by +default. The only possible returned error is `ErrBadPattern`, reporting that +the pattern is malformed. + +To enable aborting on I/O errors, the `WithFailOnIOErrors` option can be +passed. + +Note: this is meant as a drop-in replacement for `io/fs.Glob()`. Like +`io/fs.Glob()`, this function assumes that your pattern uses `/` as the path +separator even if that's not correct for your OS (like Windows). If you aren't +sure if that's the case, you can use `filepath.ToSlash()` on your pattern +before calling `Glob()`. + +Like `io/fs.Glob()`, patterns containing `/./`, `/../`, or starting with `/` +will return no results and no errors. This seems to be a [conscious +decision](https://github.com/golang/go/issues/44092#issuecomment-774132549), +even if counter-intuitive. You can use [SplitPattern] to divide a pattern into +a base path (to initialize an `FS` object) and pattern. + +Note: users should _not_ count on the returned error, +`doublestar.ErrBadPattern`, being equal to `path.ErrBadPattern`. + +### GlobWalk + +```go +type GlobWalkFunc func(path string, d fs.DirEntry) error + +func GlobWalk(fsys fs.FS, pattern string, fn GlobWalkFunc, opts ...GlobOption) error +``` + +GlobWalk calls the callback function `fn` for every file matching pattern. The +syntax of pattern is the same as in Match() and the behavior is the same as +Glob(), with regard to limitations (such as patterns containing `/./`, `/../`, +or starting with `/`). The pattern may describe hierarchical names such as +usr/*/bin/ed. + +GlobWalk may have a small performance benefit over Glob if you do not need a +slice of matches because it can avoid allocating memory for the matches. +Additionally, GlobWalk gives you access to the `fs.DirEntry` objects for each +match, and lets you quit early by returning a non-nil error from your callback +function. Like `io/fs.WalkDir`, if your callback returns `SkipDir`, GlobWalk +will skip the current directory. This means that if the current path _is_ a +directory, GlobWalk will not recurse into it. If the current path is not a +directory, the rest of the parent directory will be skipped. + +GlobWalk ignores file system errors such as I/O errors reading directories by +default. GlobWalk may return `ErrBadPattern`, reporting that the pattern is +malformed. + +To enable aborting on I/O errors, the `WithFailOnIOErrors` option can be +passed. + +Additionally, if the callback function `fn` returns an error, GlobWalk will +exit immediately and return that error. + +Like Glob(), this function assumes that your pattern uses `/` as the path +separator even if that's not correct for your OS (like Windows). If you aren't +sure if that's the case, you can use filepath.ToSlash() on your pattern before +calling GlobWalk(). + +Note: users should _not_ count on the returned error, +`doublestar.ErrBadPattern`, being equal to `path.ErrBadPattern`. + +### FilepathGlob + +```go +func FilepathGlob(pattern string, opts ...GlobOption) (matches []string, err error) +``` + +FilepathGlob returns the names of all files matching pattern or nil if there is +no matching file. The syntax of pattern is the same as in Match(). The pattern +may describe hierarchical names such as usr/*/bin/ed. + +FilepathGlob ignores file system errors such as I/O errors reading directories +by default. The only possible returned error is `ErrBadPattern`, reporting that +the pattern is malformed. + +To enable aborting on I/O errors, the `WithFailOnIOErrors` option can be +passed. + +Note: FilepathGlob is a convenience function that is meant as a drop-in +replacement for `path/filepath.Glob()` for users who don't need the +complication of io/fs. Basically, it: + +* Runs `filepath.Clean()` and `ToSlash()` on the pattern +* Runs `SplitPattern()` to get a base path and a pattern to Glob +* Creates an FS object from the base path and `Glob()s` on the pattern +* Joins the base path with all of the matches from `Glob()` + +Returned paths will use the system's path separator, just like +`filepath.Glob()`. + +Note: the returned error `doublestar.ErrBadPattern` is not equal to +`filepath.ErrBadPattern`. + +### SplitPattern + +```go +func SplitPattern(p string) (base, pattern string) +``` + +SplitPattern is a utility function. Given a pattern, SplitPattern will return +two strings: the first string is everything up to the last slash (`/`) that +appears _before_ any unescaped "meta" characters (ie, `*?[{`). The second +string is everything after that slash. For example, given the pattern: + +``` +../../path/to/meta*/** + ^----------- split here +``` + +SplitPattern returns "../../path/to" and "meta*/**". This is useful for +initializing os.DirFS() to call Glob() because Glob() will silently fail if +your pattern includes `/./` or `/../`. For example: + +```go +base, pattern := SplitPattern("../../path/to/meta*/**") +fsys := os.DirFS(base) +matches, err := Glob(fsys, pattern) +``` + +If SplitPattern cannot find somewhere to split the pattern (for example, +`meta*/**`), it will return "." and the unaltered pattern (`meta*/**` in this +example). + +Of course, it is your responsibility to decide if the returned base path is +"safe" in the context of your application. Perhaps you could use Match() to +validate against a list of approved base directories? + +### ValidatePattern + +```go +func ValidatePattern(s string) bool +``` + +Validate a pattern. Patterns are validated while they run in Match(), +PathMatch(), and Glob(), so, you normally wouldn't need to call this. However, +there are cases where this might be useful: for example, if your program allows +a user to enter a pattern that you'll run at a later time, you might want to +validate it. + +ValidatePattern assumes your pattern uses '/' as the path separator. + +### ValidatePathPattern + +```go +func ValidatePathPattern(s string) bool +``` + +Like ValidatePattern, only uses your OS path separator. In other words, use +ValidatePattern if you would normally use Match() or Glob(). Use +ValidatePathPattern if you would normally use PathMatch(). Keep in mind, Glob() +requires '/' separators, even if your OS uses something else. + +### Patterns + +**doublestar** supports the following special terms in the patterns: + +Special Terms | Meaning +------------- | ------- +`*` | matches any sequence of non-path-separators +`/**/` | matches zero or more directories +`?` | matches any single non-path-separator character +`[class]` | matches any single non-path-separator character against a class of characters ([see "character classes"]) +`{alt1,...}` | matches a sequence of characters if one of the comma-separated alternatives matches + +Any character with a special meaning can be escaped with a backslash (`\`). + +A doublestar (`**`) should appear surrounded by path separators such as `/**/`. +A mid-pattern doublestar (`**`) behaves like bash's globstar option: a pattern +such as `path/to/**.txt` would return the same results as `path/to/*.txt`. The +pattern you're looking for is `path/to/**/*.txt`. + +#### Character Classes + +Character classes support the following: + +Class | Meaning +---------- | ------- +`[abc]` | matches any single character within the set +`[a-z]` | matches any single character in the range +`[^class]` | matches any single character which does *not* match the class +`[!class]` | same as `^`: negates the class + +## Performance + +``` +goos: darwin +goarch: amd64 +pkg: github.com/bmatcuk/doublestar/v4 +cpu: Intel(R) Core(TM) i7-4870HQ CPU @ 2.50GHz +BenchmarkMatch-8 285639 3868 ns/op 0 B/op 0 allocs/op +BenchmarkGoMatch-8 286945 3726 ns/op 0 B/op 0 allocs/op +BenchmarkPathMatch-8 320511 3493 ns/op 0 B/op 0 allocs/op +BenchmarkGoPathMatch-8 304236 3434 ns/op 0 B/op 0 allocs/op +BenchmarkGlob-8 466 2501123 ns/op 190225 B/op 2849 allocs/op +BenchmarkGlobWalk-8 476 2536293 ns/op 184017 B/op 2750 allocs/op +BenchmarkGoGlob-8 463 2574836 ns/op 194249 B/op 2929 allocs/op +``` + +These benchmarks (in `doublestar_test.go`) compare Match() to path.Match(), +PathMath() to filepath.Match(), and Glob() + GlobWalk() to io/fs.Glob(). They +only run patterns that the standard go packages can understand as well (so, no +`{alts}` or `**`) for a fair comparison. Of course, alts and doublestars will +be less performant than the other pattern meta characters. + +Alts are essentially like running multiple patterns, the number of which can +get large if your pattern has alts nested inside alts. This affects both +matching (ie, Match()) and globbing (Glob()). + +`**` performance in matching is actually pretty similar to a regular `*`, but +can cause a large number of reads when globbing as it will need to recursively +traverse your filesystem. + +## Sponsors +I started this project in 2014 in my spare time and have been maintaining it +ever since. In that time, it has grown into one of the most popular globbing +libraries in the Go ecosystem. So, if **doublestar** is a useful library in +your project, consider [sponsoring] my work! I'd really appreciate it! + +Thanks for sponsoring me! + +## License + +[MIT License](LICENSE) + +[SplitPattern]: #splitpattern +[doublestar]: https://github.com/bmatcuk/doublestar +[golang]: http://golang.org/ +[io/fs]: https://pkg.go.dev/io/fs +[see "character classes"]: #character-classes +[see "patterns"]: #patterns +[sponsoring]: https://github.com/sponsors/bmatcuk diff --git a/vendor/github.com/bmatcuk/doublestar/v4/UPGRADING.md b/vendor/github.com/bmatcuk/doublestar/v4/UPGRADING.md new file mode 100644 index 00000000..25aace3d --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/UPGRADING.md @@ -0,0 +1,63 @@ +# Upgrading from v3 to v4 + +v4 is a complete rewrite with a focus on performance. Additionally, +[doublestar] has been updated to use the new [io/fs] package for filesystem +access. As a result, it is only supported by [golang] v1.16+. + +`Match()` and `PathMatch()` mostly did not change, besides big performance +improvements. Their API is the same. However, note the following corner cases: + +* In previous versions of [doublestar], `PathMatch()` could accept patterns + that used either platform-specific path separators, or `/`. This was + undocumented and didn't match `filepath.Match()`. In v4, both `pattern` and + `name` must be using appropriate path separators for the platform. You can + use `filepath.FromSlash()` to change `/` to platform-specific separators if + you aren't sure. +* In previous versions of [doublestar], a pattern such as `path/to/a/**` would + _not_ match `path/to/a`. In v4, this pattern _will_ match because if `a` was + a directory, `Glob()` would return it. In other words, the following returns + true: `Match("path/to/a/**", "path/to/a")` + +`Glob()` changed from using a [doublestar]-specific filesystem abstraction (the +`OS` interface) to the [io/fs] package. As a result, it now takes a `fs.FS` as +its first argument. This change has a couple ramifications: + +* Like `io/fs.Glob`, `pattern` must use a `/` as path separator, even on + platforms that use something else. You can use `filepath.ToSlash()` on your + patterns if you aren't sure. +* Patterns that contain `/./` or `/../` are invalid. The [io/fs] package + rejects them, returning an IO error. Since `Glob()` ignores IO errors, it'll + end up being silently rejected. You can run `path.Clean()` to ensure they are + removed from the pattern. + +v4 also added a `GlobWalk()` function that is slightly more performant than +`Glob()` if you just need to iterate over the results and don't need a string +slice. You also get `fs.DirEntry` objects for each result, and can quit early +if your callback returns an error. + +# Upgrading from v2 to v3 + +v3 introduced using `!` to negate character classes, in addition to `^`. If any +of your patterns include a character class that starts with an exclamation mark +(ie, `[!...]`), you'll need to update the pattern to escape or move the +exclamation mark. Note that, like the caret (`^`), it only negates the +character class if it is the first character in the character class. + +# Upgrading from v1 to v2 + +The change from v1 to v2 was fairly minor: the return type of the `Open` method +on the `OS` interface was changed from `*os.File` to `File`, a new interface +exported by doublestar. The new `File` interface only defines the functionality +doublestar actually needs (`io.Closer` and `Readdir`), making it easier to use +doublestar with [go-billy], [afero], or something similar. If you were using +this functionality, updating should be as easy as updating `Open's` return +type, since `os.File` already implements `doublestar.File`. + +If you weren't using this functionality, updating should be as easy as changing +your dependencies to point to v2. + +[afero]: https://github.com/spf13/afero +[doublestar]: https://github.com/bmatcuk/doublestar +[go-billy]: https://github.com/src-d/go-billy +[golang]: http://golang.org/ +[io/fs]: https://golang.org/pkg/io/fs/ diff --git a/vendor/github.com/bmatcuk/doublestar/v4/doublestar.go b/vendor/github.com/bmatcuk/doublestar/v4/doublestar.go new file mode 100644 index 00000000..210fd40c --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/doublestar.go @@ -0,0 +1,13 @@ +package doublestar + +import ( + "errors" + "path" +) + +// ErrBadPattern indicates a pattern was malformed. +var ErrBadPattern = path.ErrBadPattern + +// ErrPatternNotExist indicates that the pattern passed to Glob, GlobWalk, or +// FilepathGlob references a path that does not exist. +var ErrPatternNotExist = errors.New("pattern does not exist") diff --git a/vendor/github.com/bmatcuk/doublestar/v4/glob.go b/vendor/github.com/bmatcuk/doublestar/v4/glob.go new file mode 100644 index 00000000..519601b1 --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/glob.go @@ -0,0 +1,473 @@ +package doublestar + +import ( + "errors" + "io/fs" + "path" +) + +// Glob returns the names of all files matching pattern or nil if there is no +// matching file. The syntax of pattern is the same as in Match(). The pattern +// may describe hierarchical names such as usr/*/bin/ed. +// +// Glob ignores file system errors such as I/O errors reading directories by +// default. The only possible returned error is ErrBadPattern, reporting that +// the pattern is malformed. +// +// To enable aborting on I/O errors, the WithFailOnIOErrors option can be +// passed. +// +// Note: this is meant as a drop-in replacement for io/fs.Glob(). Like +// io/fs.Glob(), this function assumes that your pattern uses `/` as the path +// separator even if that's not correct for your OS (like Windows). If you +// aren't sure if that's the case, you can use filepath.ToSlash() on your +// pattern before calling Glob(). +// +// Like `io/fs.Glob()`, patterns containing `/./`, `/../`, or starting with `/` +// will return no results and no errors. You can use SplitPattern to divide a +// pattern into a base path (to initialize an `FS` object) and pattern. +// +// Note: users should _not_ count on the returned error, +// doublestar.ErrBadPattern, being equal to path.ErrBadPattern. +// +func Glob(fsys fs.FS, pattern string, opts ...GlobOption) ([]string, error) { + if !ValidatePattern(pattern) { + return nil, ErrBadPattern + } + + g := newGlob(opts...) + + if hasMidDoubleStar(pattern) { + // If the pattern has a `**` anywhere but the very end, GlobWalk is more + // performant because it can get away with less allocations. If the pattern + // ends in a `**`, both methods are pretty much the same, but Glob has a + // _very_ slight advantage because of lower function call overhead. + var matches []string + err := g.doGlobWalk(fsys, pattern, true, true, func(p string, d fs.DirEntry) error { + matches = append(matches, p) + return nil + }) + return matches, err + } + return g.doGlob(fsys, pattern, nil, true, true) +} + +// Does the actual globbin' +// - firstSegment is true if we're in the first segment of the pattern, ie, +// the right-most part where we can match files. If it's false, we're +// somewhere in the middle (or at the beginning) and can only match +// directories since there are path segments above us. +// - beforeMeta is true if we're exploring segments before any meta +// characters, ie, in a pattern such as `path/to/file*.txt`, the `path/to/` +// bit does not contain any meta characters. +func (g *glob) doGlob(fsys fs.FS, pattern string, m []string, firstSegment, beforeMeta bool) (matches []string, err error) { + matches = m + patternStart := indexMeta(pattern) + if patternStart == -1 { + // pattern doesn't contain any meta characters - does a file matching the + // pattern exist? + // The pattern may contain escaped wildcard characters for an exact path match. + path := unescapeMeta(pattern) + pathInfo, pathExists, pathErr := g.exists(fsys, path, beforeMeta) + if pathErr != nil { + return nil, pathErr + } + + if pathExists && (!firstSegment || !g.filesOnly || !pathInfo.IsDir()) { + matches = append(matches, path) + } + + return + } + + dir := "." + splitIdx := lastIndexSlashOrAlt(pattern) + if splitIdx != -1 { + if pattern[splitIdx] == '}' { + openingIdx := indexMatchedOpeningAlt(pattern[:splitIdx]) + if openingIdx == -1 { + // if there's no matching opening index, technically Match() will treat + // an unmatched `}` as nothing special, so... we will, too! + splitIdx = lastIndexSlash(pattern[:splitIdx]) + if splitIdx != -1 { + dir = pattern[:splitIdx] + pattern = pattern[splitIdx+1:] + } + } else { + // otherwise, we have to handle the alts: + return g.globAlts(fsys, pattern, openingIdx, splitIdx, matches, firstSegment, beforeMeta) + } + } else { + dir = pattern[:splitIdx] + pattern = pattern[splitIdx+1:] + } + } + + // if `splitIdx` is less than `patternStart`, we know `dir` has no meta + // characters. They would be equal if they are both -1, which means `dir` + // will be ".", and we know that doesn't have meta characters either. + if splitIdx <= patternStart { + return g.globDir(fsys, dir, pattern, matches, firstSegment, beforeMeta) + } + + var dirs []string + dirs, err = g.doGlob(fsys, dir, matches, false, beforeMeta) + if err != nil { + return + } + for _, d := range dirs { + matches, err = g.globDir(fsys, d, pattern, matches, firstSegment, false) + if err != nil { + return + } + } + + return +} + +// handle alts in the glob pattern - `openingIdx` and `closingIdx` are the +// indexes of `{` and `}`, respectively +func (g *glob) globAlts(fsys fs.FS, pattern string, openingIdx, closingIdx int, m []string, firstSegment, beforeMeta bool) (matches []string, err error) { + matches = m + + var dirs []string + startIdx := 0 + afterIdx := closingIdx + 1 + splitIdx := lastIndexSlashOrAlt(pattern[:openingIdx]) + if splitIdx == -1 || pattern[splitIdx] == '}' { + // no common prefix + dirs = []string{""} + } else { + // our alts have a common prefix that we can process first + dirs, err = g.doGlob(fsys, pattern[:splitIdx], matches, false, beforeMeta) + if err != nil { + return + } + + startIdx = splitIdx + 1 + } + + for _, d := range dirs { + patIdx := openingIdx + 1 + altResultsStartIdx := len(matches) + thisResultStartIdx := altResultsStartIdx + for patIdx < closingIdx { + nextIdx := indexNextAlt(pattern[patIdx:closingIdx], true) + if nextIdx == -1 { + nextIdx = closingIdx + } else { + nextIdx += patIdx + } + + alt := buildAlt(d, pattern, startIdx, openingIdx, patIdx, nextIdx, afterIdx) + matches, err = g.doGlob(fsys, alt, matches, firstSegment, beforeMeta) + if err != nil { + return + } + + matchesLen := len(matches) + if altResultsStartIdx != thisResultStartIdx && thisResultStartIdx != matchesLen { + // Alts can result in matches that aren't sorted, or, worse, duplicates + // (consider the trivial pattern `path/to/{a,*}`). Since doGlob returns + // sorted results, we can do a sort of in-place merge and remove + // duplicates. But, we only need to do this if this isn't the first alt + // (ie, `altResultsStartIdx != thisResultsStartIdx`) and if the latest + // alt actually added some matches (`thisResultStartIdx != + // len(matches)`) + matches = sortAndRemoveDups(matches, altResultsStartIdx, thisResultStartIdx, matchesLen) + + // length of matches may have changed + thisResultStartIdx = len(matches) + } else { + thisResultStartIdx = matchesLen + } + + patIdx = nextIdx + 1 + } + } + + return +} + +// find files/subdirectories in the given `dir` that match `pattern` +func (g *glob) globDir(fsys fs.FS, dir, pattern string, matches []string, canMatchFiles, beforeMeta bool) (m []string, e error) { + m = matches + + if pattern == "" { + if !canMatchFiles || !g.filesOnly { + // pattern can be an empty string if the original pattern ended in a + // slash, in which case, we should just return dir, but only if it + // actually exists and it's a directory (or a symlink to a directory) + _, isDir, err := g.isPathDir(fsys, dir, beforeMeta) + if err != nil { + return nil, err + } + if isDir { + m = append(m, dir) + } + } + return + } + + if pattern == "**" { + return g.globDoubleStar(fsys, dir, m, canMatchFiles, beforeMeta) + } + + dirs, err := fs.ReadDir(fsys, dir) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + e = g.handlePatternNotExist(beforeMeta) + } else { + e = g.forwardErrIfFailOnIOErrors(err) + } + return + } + + var matched bool + for _, info := range dirs { + name := info.Name() + matched, e = matchWithSeparator(pattern, name, '/', false) + if e != nil { + return + } + if matched { + matched = canMatchFiles + if !matched || g.filesOnly { + matched, e = g.isDir(fsys, dir, name, info) + if e != nil { + return + } + if canMatchFiles { + // if we're here, it's because g.filesOnly + // is set and we don't want directories + matched = !matched + } + } + if matched { + m = append(m, path.Join(dir, name)) + } + } + } + + return +} + +func (g *glob) globDoubleStar(fsys fs.FS, dir string, matches []string, canMatchFiles, beforeMeta bool) ([]string, error) { + dirs, err := fs.ReadDir(fsys, dir) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return matches, g.handlePatternNotExist(beforeMeta) + } else { + return matches, g.forwardErrIfFailOnIOErrors(err) + } + } + + if !g.filesOnly { + // `**` can match *this* dir, so add it + matches = append(matches, dir) + } + + for _, info := range dirs { + name := info.Name() + isDir, err := g.isDir(fsys, dir, name, info) + if err != nil { + return nil, err + } + if isDir { + matches, err = g.globDoubleStar(fsys, path.Join(dir, name), matches, canMatchFiles, false) + if err != nil { + return nil, err + } + } else if canMatchFiles { + matches = append(matches, path.Join(dir, name)) + } + } + + return matches, nil +} + +// Returns true if the pattern has a doublestar in the middle of the pattern. +// In this case, GlobWalk is faster because it can get away with less +// allocations. However, Glob has a _very_ slight edge if the pattern ends in +// `**`. +func hasMidDoubleStar(p string) bool { + // subtract 3: 2 because we want to return false if the pattern ends in `**` + // (Glob is _very_ slightly faster in that case), and the extra 1 because our + // loop checks p[i] and p[i+1]. + l := len(p) - 3 + for i := 0; i < l; i++ { + if p[i] == '\\' { + // escape next byte + i++ + } else if p[i] == '*' && p[i+1] == '*' { + return true + } + } + return false +} + +// Returns the index of the first unescaped meta character, or negative 1. +func indexMeta(s string) int { + var c byte + l := len(s) + for i := 0; i < l; i++ { + c = s[i] + if c == '*' || c == '?' || c == '[' || c == '{' { + return i + } else if c == '\\' { + // skip next byte + i++ + } + } + return -1 +} + +// Returns the index of the last unescaped slash or closing alt (`}`) in the +// string, or negative 1. +func lastIndexSlashOrAlt(s string) int { + for i := len(s) - 1; i >= 0; i-- { + if (s[i] == '/' || s[i] == '}') && (i == 0 || s[i-1] != '\\') { + return i + } + } + return -1 +} + +// Returns the index of the last unescaped slash in the string, or negative 1. +func lastIndexSlash(s string) int { + for i := len(s) - 1; i >= 0; i-- { + if s[i] == '/' && (i == 0 || s[i-1] != '\\') { + return i + } + } + return -1 +} + +// Assuming the byte after the end of `s` is a closing `}`, this function will +// find the index of the matching `{`. That is, it'll skip over any nested `{}` +// and account for escaping. +func indexMatchedOpeningAlt(s string) int { + alts := 1 + for i := len(s) - 1; i >= 0; i-- { + if s[i] == '}' && (i == 0 || s[i-1] != '\\') { + alts++ + } else if s[i] == '{' && (i == 0 || s[i-1] != '\\') { + if alts--; alts == 0 { + return i + } + } + } + return -1 +} + +// Returns true if the path exists +func (g *glob) exists(fsys fs.FS, name string, beforeMeta bool) (fs.FileInfo, bool, error) { + // name might end in a slash, but Stat doesn't like that + namelen := len(name) + if namelen > 1 && name[namelen-1] == '/' { + name = name[:namelen-1] + } + + info, err := fs.Stat(fsys, name) + if errors.Is(err, fs.ErrNotExist) { + return nil, false, g.handlePatternNotExist(beforeMeta) + } + return info, err == nil, g.forwardErrIfFailOnIOErrors(err) +} + +// Returns true if the path exists and is a directory or a symlink to a +// directory +func (g *glob) isPathDir(fsys fs.FS, name string, beforeMeta bool) (fs.FileInfo, bool, error) { + info, err := fs.Stat(fsys, name) + if errors.Is(err, fs.ErrNotExist) { + return nil, false, g.handlePatternNotExist(beforeMeta) + } + return info, err == nil && info.IsDir(), g.forwardErrIfFailOnIOErrors(err) +} + +// Returns whether or not the given DirEntry is a directory. If the DirEntry +// represents a symbolic link, the link is followed by running fs.Stat() on +// `path.Join(dir, name)` (if dir is "", name will be used without joining) +func (g *glob) isDir(fsys fs.FS, dir, name string, info fs.DirEntry) (bool, error) { + if !g.noFollow && (info.Type()&fs.ModeSymlink) > 0 { + p := name + if dir != "" { + p = path.Join(dir, name) + } + finfo, err := fs.Stat(fsys, p) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // this function is only ever called while expanding a glob, so it can + // never return ErrPatternNotExist + return false, nil + } + return false, g.forwardErrIfFailOnIOErrors(err) + } + return finfo.IsDir(), nil + } + return info.IsDir(), nil +} + +// Builds a string from an alt +func buildAlt(prefix, pattern string, startIdx, openingIdx, currentIdx, nextIdx, afterIdx int) string { + // pattern: + // ignored/start{alts,go,here}remaining - len = 36 + // | | | | ^--- afterIdx = 27 + // | | | \--------- nextIdx = 21 + // | | \----------- currentIdx = 19 + // | \----------------- openingIdx = 13 + // \---------------------- startIdx = 8 + // + // result: + // prefix/startgoremaining - len = 7 + 5 + 2 + 9 = 23 + var buf []byte + patLen := len(pattern) + size := (openingIdx - startIdx) + (nextIdx - currentIdx) + (patLen - afterIdx) + if prefix != "" && prefix != "." { + buf = make([]byte, 0, size+len(prefix)+1) + buf = append(buf, prefix...) + buf = append(buf, '/') + } else { + buf = make([]byte, 0, size) + } + buf = append(buf, pattern[startIdx:openingIdx]...) + buf = append(buf, pattern[currentIdx:nextIdx]...) + if afterIdx < patLen { + buf = append(buf, pattern[afterIdx:]...) + } + return string(buf) +} + +// Running alts can produce results that are not sorted, and, worse, can cause +// duplicates (consider the trivial pattern `path/to/{a,*}`). Since we know +// each run of doGlob is sorted, we can basically do the "merge" step of a +// merge sort in-place. +func sortAndRemoveDups(matches []string, idx1, idx2, l int) []string { + var tmp string + for ; idx1 < idx2; idx1++ { + if matches[idx1] < matches[idx2] { + // order is correct + continue + } else if matches[idx1] > matches[idx2] { + // need to swap and then re-sort matches above idx2 + tmp = matches[idx1] + matches[idx1] = matches[idx2] + + shft := idx2 + 1 + for ; shft < l && matches[shft] < tmp; shft++ { + matches[shft-1] = matches[shft] + } + matches[shft-1] = tmp + } else { + // duplicate - shift matches above idx2 down one and decrement l + for shft := idx2 + 1; shft < l; shft++ { + matches[shft-1] = matches[shft] + } + if l--; idx2 == l { + // nothing left to do... matches[idx2:] must have been full of dups + break + } + } + } + return matches[:l] +} diff --git a/vendor/github.com/bmatcuk/doublestar/v4/globoptions.go b/vendor/github.com/bmatcuk/doublestar/v4/globoptions.go new file mode 100644 index 00000000..9483c4bb --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/globoptions.go @@ -0,0 +1,144 @@ +package doublestar + +import "strings" + +// glob is an internal type to store options during globbing. +type glob struct { + failOnIOErrors bool + failOnPatternNotExist bool + filesOnly bool + noFollow bool +} + +// GlobOption represents a setting that can be passed to Glob, GlobWalk, and +// FilepathGlob. +type GlobOption func(*glob) + +// Construct a new glob object with the given options +func newGlob(opts ...GlobOption) *glob { + g := &glob{} + for _, opt := range opts { + opt(g) + } + return g +} + +// WithFailOnIOErrors is an option that can be passed to Glob, GlobWalk, or +// FilepathGlob. If passed, doublestar will abort and return IO errors when +// encountered. Note that if the glob pattern references a path that does not +// exist (such as `nonexistent/path/*`), this is _not_ considered an IO error: +// it is considered a pattern with no matches. +// +func WithFailOnIOErrors() GlobOption { + return func(g *glob) { + g.failOnIOErrors = true + } +} + +// WithFailOnPatternNotExist is an option that can be passed to Glob, GlobWalk, +// or FilepathGlob. If passed, doublestar will abort and return +// ErrPatternNotExist if the pattern references a path that does not exist +// before any meta charcters such as `nonexistent/path/*`. Note that alts (ie, +// `{...}`) are expanded before this check. In other words, a pattern such as +// `{a,b}/*` may fail if either `a` or `b` do not exist but `*/{a,b}` will +// never fail because the star may match nothing. +// +func WithFailOnPatternNotExist() GlobOption { + return func(g *glob) { + g.failOnPatternNotExist = true + } +} + +// WithFilesOnly is an option that can be passed to Glob, GlobWalk, or +// FilepathGlob. If passed, doublestar will only return files that match the +// pattern, not directories. +// +// Note: if combined with the WithNoFollow option, symlinks to directories +// _will_ be included in the result since no attempt is made to follow the +// symlink. +// +func WithFilesOnly() GlobOption { + return func(g *glob) { + g.filesOnly = true + } +} + +// WithNoFollow is an option that can be passed to Glob, GlobWalk, or +// FilepathGlob. If passed, doublestar will not follow symlinks while +// traversing the filesystem. However, due to io/fs's _very_ poor support for +// querying the filesystem about symlinks, there's a caveat here: if part of +// the pattern before any meta characters contains a reference to a symlink, it +// will be followed. For example, a pattern such as `path/to/symlink/*` will be +// followed assuming it is a valid symlink to a directory. However, from this +// same example, a pattern such as `path/to/**` will not traverse the +// `symlink`, nor would `path/*/symlink/*` +// +// Note: if combined with the WithFilesOnly option, symlinks to directories +// _will_ be included in the result since no attempt is made to follow the +// symlink. +// +func WithNoFollow() GlobOption { + return func(g *glob) { + g.noFollow = true + } +} + +// forwardErrIfFailOnIOErrors is used to wrap the return values of I/O +// functions. When failOnIOErrors is enabled, it will return err; otherwise, it +// always returns nil. +// +func (g *glob) forwardErrIfFailOnIOErrors(err error) error { + if g.failOnIOErrors { + return err + } + return nil +} + +// handleErrNotExist handles fs.ErrNotExist errors. If +// WithFailOnPatternNotExist has been enabled and canFail is true, this will +// return ErrPatternNotExist. Otherwise, it will return nil. +// +func (g *glob) handlePatternNotExist(canFail bool) error { + if canFail && g.failOnPatternNotExist { + return ErrPatternNotExist + } + return nil +} + +// Format options for debugging/testing purposes +func (g *glob) GoString() string { + var b strings.Builder + b.WriteString("opts: ") + + hasOpts := false + if g.failOnIOErrors { + b.WriteString("WithFailOnIOErrors") + hasOpts = true + } + if g.failOnPatternNotExist { + if hasOpts { + b.WriteString(", ") + } + b.WriteString("WithFailOnPatternNotExist") + hasOpts = true + } + if g.filesOnly { + if hasOpts { + b.WriteString(", ") + } + b.WriteString("WithFilesOnly") + hasOpts = true + } + if g.noFollow { + if hasOpts { + b.WriteString(", ") + } + b.WriteString("WithNoFollow") + hasOpts = true + } + + if !hasOpts { + b.WriteString("nil") + } + return b.String() +} diff --git a/vendor/github.com/bmatcuk/doublestar/v4/globwalk.go b/vendor/github.com/bmatcuk/doublestar/v4/globwalk.go new file mode 100644 index 00000000..84e764f0 --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/globwalk.go @@ -0,0 +1,414 @@ +package doublestar + +import ( + "errors" + "io/fs" + "path" + "path/filepath" + "strings" +) + +// If returned from GlobWalkFunc, will cause GlobWalk to skip the current +// directory. In other words, if the current path is a directory, GlobWalk will +// not recurse into it. Otherwise, GlobWalk will skip the rest of the current +// directory. +var SkipDir = fs.SkipDir + +// Callback function for GlobWalk(). If the function returns an error, GlobWalk +// will end immediately and return the same error. +type GlobWalkFunc func(path string, d fs.DirEntry) error + +// GlobWalk calls the callback function `fn` for every file matching pattern. +// The syntax of pattern is the same as in Match() and the behavior is the same +// as Glob(), with regard to limitations (such as patterns containing `/./`, +// `/../`, or starting with `/`). The pattern may describe hierarchical names +// such as usr/*/bin/ed. +// +// GlobWalk may have a small performance benefit over Glob if you do not need a +// slice of matches because it can avoid allocating memory for the matches. +// Additionally, GlobWalk gives you access to the `fs.DirEntry` objects for +// each match, and lets you quit early by returning a non-nil error from your +// callback function. Like `io/fs.WalkDir`, if your callback returns `SkipDir`, +// GlobWalk will skip the current directory. This means that if the current +// path _is_ a directory, GlobWalk will not recurse into it. If the current +// path is not a directory, the rest of the parent directory will be skipped. +// +// GlobWalk ignores file system errors such as I/O errors reading directories +// by default. GlobWalk may return ErrBadPattern, reporting that the pattern is +// malformed. +// +// To enable aborting on I/O errors, the WithFailOnIOErrors option can be +// passed. +// +// Additionally, if the callback function `fn` returns an error, GlobWalk will +// exit immediately and return that error. +// +// Like Glob(), this function assumes that your pattern uses `/` as the path +// separator even if that's not correct for your OS (like Windows). If you +// aren't sure if that's the case, you can use filepath.ToSlash() on your +// pattern before calling GlobWalk(). +// +// Note: users should _not_ count on the returned error, +// doublestar.ErrBadPattern, being equal to path.ErrBadPattern. +// +func GlobWalk(fsys fs.FS, pattern string, fn GlobWalkFunc, opts ...GlobOption) error { + if !ValidatePattern(pattern) { + return ErrBadPattern + } + + g := newGlob(opts...) + return g.doGlobWalk(fsys, pattern, true, true, fn) +} + +// Actually execute GlobWalk +// - firstSegment is true if we're in the first segment of the pattern, ie, +// the right-most part where we can match files. If it's false, we're +// somewhere in the middle (or at the beginning) and can only match +// directories since there are path segments above us. +// - beforeMeta is true if we're exploring segments before any meta +// characters, ie, in a pattern such as `path/to/file*.txt`, the `path/to/` +// bit does not contain any meta characters. +func (g *glob) doGlobWalk(fsys fs.FS, pattern string, firstSegment, beforeMeta bool, fn GlobWalkFunc) error { + patternStart := indexMeta(pattern) + if patternStart == -1 { + // pattern doesn't contain any meta characters - does a file matching the + // pattern exist? + // The pattern may contain escaped wildcard characters for an exact path match. + path := unescapeMeta(pattern) + info, pathExists, err := g.exists(fsys, path, beforeMeta) + if pathExists && (!firstSegment || !g.filesOnly || !info.IsDir()) { + err = fn(path, dirEntryFromFileInfo(info)) + if err == SkipDir { + err = nil + } + } + return err + } + + dir := "." + splitIdx := lastIndexSlashOrAlt(pattern) + if splitIdx != -1 { + if pattern[splitIdx] == '}' { + openingIdx := indexMatchedOpeningAlt(pattern[:splitIdx]) + if openingIdx == -1 { + // if there's no matching opening index, technically Match() will treat + // an unmatched `}` as nothing special, so... we will, too! + splitIdx = lastIndexSlash(pattern[:splitIdx]) + if splitIdx != -1 { + dir = pattern[:splitIdx] + pattern = pattern[splitIdx+1:] + } + } else { + // otherwise, we have to handle the alts: + return g.globAltsWalk(fsys, pattern, openingIdx, splitIdx, firstSegment, beforeMeta, fn) + } + } else { + dir = pattern[:splitIdx] + pattern = pattern[splitIdx+1:] + } + } + + // if `splitIdx` is less than `patternStart`, we know `dir` has no meta + // characters. They would be equal if they are both -1, which means `dir` + // will be ".", and we know that doesn't have meta characters either. + if splitIdx <= patternStart { + return g.globDirWalk(fsys, dir, pattern, firstSegment, beforeMeta, fn) + } + + return g.doGlobWalk(fsys, dir, false, beforeMeta, func(p string, d fs.DirEntry) error { + if err := g.globDirWalk(fsys, p, pattern, firstSegment, false, fn); err != nil { + return err + } + return nil + }) +} + +// handle alts in the glob pattern - `openingIdx` and `closingIdx` are the +// indexes of `{` and `}`, respectively +func (g *glob) globAltsWalk(fsys fs.FS, pattern string, openingIdx, closingIdx int, firstSegment, beforeMeta bool, fn GlobWalkFunc) (err error) { + var matches []DirEntryWithFullPath + startIdx := 0 + afterIdx := closingIdx + 1 + splitIdx := lastIndexSlashOrAlt(pattern[:openingIdx]) + if splitIdx == -1 || pattern[splitIdx] == '}' { + // no common prefix + matches, err = g.doGlobAltsWalk(fsys, "", pattern, startIdx, openingIdx, closingIdx, afterIdx, firstSegment, beforeMeta, matches) + if err != nil { + return + } + } else { + // our alts have a common prefix that we can process first + startIdx = splitIdx + 1 + innerBeforeMeta := beforeMeta && !hasMetaExceptAlts(pattern[:splitIdx]) + err = g.doGlobWalk(fsys, pattern[:splitIdx], false, beforeMeta, func(p string, d fs.DirEntry) (e error) { + matches, e = g.doGlobAltsWalk(fsys, p, pattern, startIdx, openingIdx, closingIdx, afterIdx, firstSegment, innerBeforeMeta, matches) + return e + }) + if err != nil { + return + } + } + + skip := "" + for _, m := range matches { + if skip != "" { + // Because matches are sorted, we know that descendants of the skipped + // item must come immediately after the skipped item. If we find an item + // that does not have a prefix matching the skipped item, we know we're + // done skipping. I'm using strings.HasPrefix here because + // filepath.HasPrefix has been marked deprecated (and just calls + // strings.HasPrefix anyway). The reason it's deprecated is because it + // doesn't handle case-insensitive paths, nor does it guarantee that the + // prefix is actually a parent directory. Neither is an issue here: the + // paths come from the system so their cases will match, and we guarantee + // a parent directory by appending a slash to the prefix. + // + // NOTE: m.Path will always use slashes as path separators. + if strings.HasPrefix(m.Path, skip) { + continue + } + skip = "" + } + if err = fn(m.Path, m.Entry); err != nil { + if err == SkipDir { + isDir, err := g.isDir(fsys, "", m.Path, m.Entry) + if err != nil { + return err + } + if isDir { + // append a slash to guarantee `skip` will be treated as a parent dir + skip = m.Path + "/" + } else { + // Dir() calls Clean() which calls FromSlash(), so we need to convert + // back to slashes + skip = filepath.ToSlash(filepath.Dir(m.Path)) + "/" + } + err = nil + continue + } + return + } + } + + return +} + +// runs actual matching for alts +func (g *glob) doGlobAltsWalk(fsys fs.FS, d, pattern string, startIdx, openingIdx, closingIdx, afterIdx int, firstSegment, beforeMeta bool, m []DirEntryWithFullPath) (matches []DirEntryWithFullPath, err error) { + matches = m + matchesLen := len(m) + patIdx := openingIdx + 1 + for patIdx < closingIdx { + nextIdx := indexNextAlt(pattern[patIdx:closingIdx], true) + if nextIdx == -1 { + nextIdx = closingIdx + } else { + nextIdx += patIdx + } + + alt := buildAlt(d, pattern, startIdx, openingIdx, patIdx, nextIdx, afterIdx) + err = g.doGlobWalk(fsys, alt, firstSegment, beforeMeta, func(p string, d fs.DirEntry) error { + // insertion sort, ignoring dups + insertIdx := matchesLen + for insertIdx > 0 && matches[insertIdx-1].Path > p { + insertIdx-- + } + if insertIdx > 0 && matches[insertIdx-1].Path == p { + // dup + return nil + } + + // append to grow the slice, then insert + entry := DirEntryWithFullPath{d, p} + matches = append(matches, entry) + for i := matchesLen; i > insertIdx; i-- { + matches[i] = matches[i-1] + } + matches[insertIdx] = entry + matchesLen++ + + return nil + }) + if err != nil { + return + } + + patIdx = nextIdx + 1 + } + + return +} + +func (g *glob) globDirWalk(fsys fs.FS, dir, pattern string, canMatchFiles, beforeMeta bool, fn GlobWalkFunc) (e error) { + if pattern == "" { + if !canMatchFiles || !g.filesOnly { + // pattern can be an empty string if the original pattern ended in a + // slash, in which case, we should just return dir, but only if it + // actually exists and it's a directory (or a symlink to a directory) + info, isDir, err := g.isPathDir(fsys, dir, beforeMeta) + if err != nil { + return err + } + if isDir { + e = fn(dir, dirEntryFromFileInfo(info)) + if e == SkipDir { + e = nil + } + } + } + return + } + + if pattern == "**" { + // `**` can match *this* dir + info, dirExists, err := g.exists(fsys, dir, beforeMeta) + if err != nil { + return err + } + if !dirExists || !info.IsDir() { + return nil + } + if !canMatchFiles || !g.filesOnly { + if e = fn(dir, dirEntryFromFileInfo(info)); e != nil { + if e == SkipDir { + e = nil + } + return + } + } + return g.globDoubleStarWalk(fsys, dir, canMatchFiles, fn) + } + + dirs, err := fs.ReadDir(fsys, dir) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return g.handlePatternNotExist(beforeMeta) + } + return g.forwardErrIfFailOnIOErrors(err) + } + + var matched bool + for _, info := range dirs { + name := info.Name() + matched, e = matchWithSeparator(pattern, name, '/', false) + if e != nil { + return + } + if matched { + matched = canMatchFiles + if !matched || g.filesOnly { + matched, e = g.isDir(fsys, dir, name, info) + if e != nil { + return e + } + if canMatchFiles { + // if we're here, it's because g.filesOnly + // is set and we don't want directories + matched = !matched + } + } + if matched { + if e = fn(path.Join(dir, name), info); e != nil { + if e == SkipDir { + e = nil + } + return + } + } + } + } + + return +} + +// recursively walk files/directories in a directory +func (g *glob) globDoubleStarWalk(fsys fs.FS, dir string, canMatchFiles bool, fn GlobWalkFunc) (e error) { + dirs, err := fs.ReadDir(fsys, dir) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // This function is only ever called after we know the top-most directory + // exists, so, if we ever get here, we know we'll never return + // ErrPatternNotExist. + return nil + } + return g.forwardErrIfFailOnIOErrors(err) + } + + for _, info := range dirs { + name := info.Name() + isDir, err := g.isDir(fsys, dir, name, info) + if err != nil { + return err + } + + if isDir { + p := path.Join(dir, name) + if !canMatchFiles || !g.filesOnly { + // `**` can match *this* dir, so add it + if e = fn(p, info); e != nil { + if e == SkipDir { + e = nil + continue + } + return + } + } + if e = g.globDoubleStarWalk(fsys, p, canMatchFiles, fn); e != nil { + return + } + } else if canMatchFiles { + if e = fn(path.Join(dir, name), info); e != nil { + if e == SkipDir { + e = nil + } + return + } + } + } + + return +} + +type DirEntryFromFileInfo struct { + fi fs.FileInfo +} + +func (d *DirEntryFromFileInfo) Name() string { + return d.fi.Name() +} + +func (d *DirEntryFromFileInfo) IsDir() bool { + return d.fi.IsDir() +} + +func (d *DirEntryFromFileInfo) Type() fs.FileMode { + return d.fi.Mode().Type() +} + +func (d *DirEntryFromFileInfo) Info() (fs.FileInfo, error) { + return d.fi, nil +} + +func dirEntryFromFileInfo(fi fs.FileInfo) fs.DirEntry { + return &DirEntryFromFileInfo{fi} +} + +type DirEntryWithFullPath struct { + Entry fs.DirEntry + Path string +} + +func hasMetaExceptAlts(s string) bool { + var c byte + l := len(s) + for i := 0; i < l; i++ { + c = s[i] + if c == '*' || c == '?' || c == '[' { + return true + } else if c == '\\' { + // skip next byte + i++ + } + } + return false +} diff --git a/vendor/github.com/bmatcuk/doublestar/v4/match.go b/vendor/github.com/bmatcuk/doublestar/v4/match.go new file mode 100644 index 00000000..4232c79f --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/match.go @@ -0,0 +1,381 @@ +package doublestar + +import ( + "path/filepath" + "unicode/utf8" +) + +// Match reports whether name matches the shell pattern. +// The pattern syntax is: +// +// pattern: +// { term } +// term: +// '*' matches any sequence of non-path-separators +// '/**/' matches zero or more directories +// '?' matches any single non-path-separator character +// '[' [ '^' '!' ] { character-range } ']' +// character class (must be non-empty) +// starting with `^` or `!` negates the class +// '{' { term } [ ',' { term } ... ] '}' +// alternatives +// c matches character c (c != '*', '?', '\\', '[') +// '\\' c matches character c +// +// character-range: +// c matches character c (c != '\\', '-', ']') +// '\\' c matches character c +// lo '-' hi matches character c for lo <= c <= hi +// +// Match returns true if `name` matches the file name `pattern`. `name` and +// `pattern` are split on forward slash (`/`) characters and may be relative or +// absolute. +// +// Match requires pattern to match all of name, not just a substring. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +// +// A doublestar (`**`) should appear surrounded by path separators such as +// `/**/`. A mid-pattern doublestar (`**`) behaves like bash's globstar +// option: a pattern such as `path/to/**.txt` would return the same results as +// `path/to/*.txt`. The pattern you're looking for is `path/to/**/*.txt`. +// +// Note: this is meant as a drop-in replacement for path.Match() which +// always uses '/' as the path separator. If you want to support systems +// which use a different path separator (such as Windows), what you want +// is PathMatch(). Alternatively, you can run filepath.ToSlash() on both +// pattern and name and then use this function. +// +// Note: users should _not_ count on the returned error, +// doublestar.ErrBadPattern, being equal to path.ErrBadPattern. +// +func Match(pattern, name string) (bool, error) { + return matchWithSeparator(pattern, name, '/', true) +} + +// PathMatch returns true if `name` matches the file name `pattern`. The +// difference between Match and PathMatch is that PathMatch will automatically +// use your system's path separator to split `name` and `pattern`. On systems +// where the path separator is `'\'`, escaping will be disabled. +// +// Note: this is meant as a drop-in replacement for filepath.Match(). It +// assumes that both `pattern` and `name` are using the system's path +// separator. If you can't be sure of that, use filepath.ToSlash() on both +// `pattern` and `name`, and then use the Match() function instead. +// +func PathMatch(pattern, name string) (bool, error) { + return matchWithSeparator(pattern, name, filepath.Separator, true) +} + +func matchWithSeparator(pattern, name string, separator rune, validate bool) (matched bool, err error) { + return doMatchWithSeparator(pattern, name, separator, validate, -1, -1, -1, -1, 0, 0) +} + +func doMatchWithSeparator(pattern, name string, separator rune, validate bool, doublestarPatternBacktrack, doublestarNameBacktrack, starPatternBacktrack, starNameBacktrack, patIdx, nameIdx int) (matched bool, err error) { + patLen := len(pattern) + nameLen := len(name) + startOfSegment := true +MATCH: + for nameIdx < nameLen { + if patIdx < patLen { + switch pattern[patIdx] { + case '*': + if patIdx++; patIdx < patLen && pattern[patIdx] == '*' { + // doublestar - must begin with a path separator, otherwise we'll + // treat it like a single star like bash + patIdx++ + if startOfSegment { + if patIdx >= patLen { + // pattern ends in `/**`: return true + return true, nil + } + + // doublestar must also end with a path separator, otherwise we're + // just going to treat the doublestar as a single star like bash + patRune, patRuneLen := utf8.DecodeRuneInString(pattern[patIdx:]) + if patRune == separator { + patIdx += patRuneLen + + doublestarPatternBacktrack = patIdx + doublestarNameBacktrack = nameIdx + starPatternBacktrack = -1 + starNameBacktrack = -1 + continue + } + } + } + startOfSegment = false + + starPatternBacktrack = patIdx + starNameBacktrack = nameIdx + continue + + case '?': + startOfSegment = false + nameRune, nameRuneLen := utf8.DecodeRuneInString(name[nameIdx:]) + if nameRune == separator { + // `?` cannot match the separator + break + } + + patIdx++ + nameIdx += nameRuneLen + continue + + case '[': + startOfSegment = false + if patIdx++; patIdx >= patLen { + // class didn't end + return false, ErrBadPattern + } + nameRune, nameRuneLen := utf8.DecodeRuneInString(name[nameIdx:]) + + matched := false + negate := pattern[patIdx] == '!' || pattern[patIdx] == '^' + if negate { + patIdx++ + } + + if patIdx >= patLen || pattern[patIdx] == ']' { + // class didn't end or empty character class + return false, ErrBadPattern + } + + last := utf8.MaxRune + for patIdx < patLen && pattern[patIdx] != ']' { + patRune, patRuneLen := utf8.DecodeRuneInString(pattern[patIdx:]) + patIdx += patRuneLen + + // match a range + if last < utf8.MaxRune && patRune == '-' && patIdx < patLen && pattern[patIdx] != ']' { + if pattern[patIdx] == '\\' { + // next character is escaped + patIdx++ + } + patRune, patRuneLen = utf8.DecodeRuneInString(pattern[patIdx:]) + patIdx += patRuneLen + + if last <= nameRune && nameRune <= patRune { + matched = true + break + } + + // didn't match range - reset `last` + last = utf8.MaxRune + continue + } + + // not a range - check if the next rune is escaped + if patRune == '\\' { + patRune, patRuneLen = utf8.DecodeRuneInString(pattern[patIdx:]) + patIdx += patRuneLen + } + + // check if the rune matches + if patRune == nameRune { + matched = true + break + } + + // no matches yet + last = patRune + } + + if matched == negate { + // failed to match - if we reached the end of the pattern, that means + // we never found a closing `]` + if patIdx >= patLen { + return false, ErrBadPattern + } + break + } + + closingIdx := indexUnescapedByte(pattern[patIdx:], ']', true) + if closingIdx == -1 { + // no closing `]` + return false, ErrBadPattern + } + + patIdx += closingIdx + 1 + nameIdx += nameRuneLen + continue + + case '{': + startOfSegment = false + beforeIdx := patIdx + patIdx++ + closingIdx := indexMatchedClosingAlt(pattern[patIdx:], separator != '\\') + if closingIdx == -1 { + // no closing `}` + return false, ErrBadPattern + } + closingIdx += patIdx + + for { + commaIdx := indexNextAlt(pattern[patIdx:closingIdx], separator != '\\') + if commaIdx == -1 { + break + } + commaIdx += patIdx + + result, err := doMatchWithSeparator(pattern[:beforeIdx]+pattern[patIdx:commaIdx]+pattern[closingIdx+1:], name, separator, validate, doublestarPatternBacktrack, doublestarNameBacktrack, starPatternBacktrack, starNameBacktrack, beforeIdx, nameIdx) + if result || err != nil { + return result, err + } + + patIdx = commaIdx + 1 + } + return doMatchWithSeparator(pattern[:beforeIdx]+pattern[patIdx:closingIdx]+pattern[closingIdx+1:], name, separator, validate, doublestarPatternBacktrack, doublestarNameBacktrack, starPatternBacktrack, starNameBacktrack, beforeIdx, nameIdx) + + case '\\': + if separator != '\\' { + // next rune is "escaped" in the pattern - literal match + if patIdx++; patIdx >= patLen { + // pattern ended + return false, ErrBadPattern + } + } + fallthrough + + default: + patRune, patRuneLen := utf8.DecodeRuneInString(pattern[patIdx:]) + nameRune, nameRuneLen := utf8.DecodeRuneInString(name[nameIdx:]) + if patRune != nameRune { + if separator != '\\' && patIdx > 0 && pattern[patIdx-1] == '\\' { + // if this rune was meant to be escaped, we need to move patIdx + // back to the backslash before backtracking or validating below + patIdx-- + } + break + } + + patIdx += patRuneLen + nameIdx += nameRuneLen + startOfSegment = patRune == separator + continue + } + } + + if starPatternBacktrack >= 0 { + // `*` backtrack, but only if the `name` rune isn't the separator + nameRune, nameRuneLen := utf8.DecodeRuneInString(name[starNameBacktrack:]) + if nameRune != separator { + starNameBacktrack += nameRuneLen + patIdx = starPatternBacktrack + nameIdx = starNameBacktrack + startOfSegment = false + continue + } + } + + if doublestarPatternBacktrack >= 0 { + // `**` backtrack, advance `name` past next separator + nameIdx = doublestarNameBacktrack + for nameIdx < nameLen { + nameRune, nameRuneLen := utf8.DecodeRuneInString(name[nameIdx:]) + nameIdx += nameRuneLen + if nameRune == separator { + doublestarNameBacktrack = nameIdx + patIdx = doublestarPatternBacktrack + startOfSegment = true + continue MATCH + } + } + } + + if validate && patIdx < patLen && !doValidatePattern(pattern[patIdx:], separator) { + return false, ErrBadPattern + } + return false, nil + } + + if nameIdx < nameLen { + // we reached the end of `pattern` before the end of `name` + return false, nil + } + + // we've reached the end of `name`; we've successfully matched if we've also + // reached the end of `pattern`, or if the rest of `pattern` can match a + // zero-length string + return isZeroLengthPattern(pattern[patIdx:], separator) +} + +func isZeroLengthPattern(pattern string, separator rune) (ret bool, err error) { + // `/**`, `**/`, and `/**/` are special cases - a pattern such as `path/to/a/**` or `path/to/a/**/` + // *should* match `path/to/a` because `a` might be a directory + if pattern == "" || + pattern == "*" || + pattern == "**" || + pattern == string(separator)+"**" || + pattern == "**"+string(separator) || + pattern == string(separator)+"**"+string(separator) { + return true, nil + } + + if pattern[0] == '{' { + closingIdx := indexMatchedClosingAlt(pattern[1:], separator != '\\') + if closingIdx == -1 { + // no closing '}' + return false, ErrBadPattern + } + closingIdx += 1 + + patIdx := 1 + for { + commaIdx := indexNextAlt(pattern[patIdx:closingIdx], separator != '\\') + if commaIdx == -1 { + break + } + commaIdx += patIdx + + ret, err = isZeroLengthPattern(pattern[patIdx:commaIdx]+pattern[closingIdx+1:], separator) + if ret || err != nil { + return + } + + patIdx = commaIdx + 1 + } + return isZeroLengthPattern(pattern[patIdx:closingIdx]+pattern[closingIdx+1:], separator) + } + + // no luck - validate the rest of the pattern + if !doValidatePattern(pattern, separator) { + return false, ErrBadPattern + } + return false, nil +} + +// Finds the index of the first unescaped byte `c`, or negative 1. +func indexUnescapedByte(s string, c byte, allowEscaping bool) int { + l := len(s) + for i := 0; i < l; i++ { + if allowEscaping && s[i] == '\\' { + // skip next byte + i++ + } else if s[i] == c { + return i + } + } + return -1 +} + +// Assuming the byte before the beginning of `s` is an opening `{`, this +// function will find the index of the matching `}`. That is, it'll skip over +// any nested `{}` and account for escaping +func indexMatchedClosingAlt(s string, allowEscaping bool) int { + alts := 1 + l := len(s) + for i := 0; i < l; i++ { + if allowEscaping && s[i] == '\\' { + // skip next byte + i++ + } else if s[i] == '{' { + alts++ + } else if s[i] == '}' { + if alts--; alts == 0 { + return i + } + } + } + return -1 +} diff --git a/vendor/github.com/bmatcuk/doublestar/v4/utils.go b/vendor/github.com/bmatcuk/doublestar/v4/utils.go new file mode 100644 index 00000000..0ab1dc98 --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/utils.go @@ -0,0 +1,147 @@ +package doublestar + +import ( + "errors" + "os" + "path" + "path/filepath" + "strings" +) + +// SplitPattern is a utility function. Given a pattern, SplitPattern will +// return two strings: the first string is everything up to the last slash +// (`/`) that appears _before_ any unescaped "meta" characters (ie, `*?[{`). +// The second string is everything after that slash. For example, given the +// pattern: +// +// ../../path/to/meta*/** +// ^----------- split here +// +// SplitPattern returns "../../path/to" and "meta*/**". This is useful for +// initializing os.DirFS() to call Glob() because Glob() will silently fail if +// your pattern includes `/./` or `/../`. For example: +// +// base, pattern := SplitPattern("../../path/to/meta*/**") +// fsys := os.DirFS(base) +// matches, err := Glob(fsys, pattern) +// +// If SplitPattern cannot find somewhere to split the pattern (for example, +// `meta*/**`), it will return "." and the unaltered pattern (`meta*/**` in +// this example). +// +// Of course, it is your responsibility to decide if the returned base path is +// "safe" in the context of your application. Perhaps you could use Match() to +// validate against a list of approved base directories? +// +func SplitPattern(p string) (base, pattern string) { + base = "." + pattern = p + + splitIdx := -1 + for i := 0; i < len(p); i++ { + c := p[i] + if c == '\\' { + i++ + } else if c == '/' { + splitIdx = i + } else if c == '*' || c == '?' || c == '[' || c == '{' { + break + } + } + + if splitIdx == 0 { + return "/", p[1:] + } else if splitIdx > 0 { + return p[:splitIdx], p[splitIdx+1:] + } + + return +} + +// FilepathGlob returns the names of all files matching pattern or nil if there +// is no matching file. The syntax of pattern is the same as in Match(). The +// pattern may describe hierarchical names such as usr/*/bin/ed. +// +// FilepathGlob ignores file system errors such as I/O errors reading +// directories by default. The only possible returned error is ErrBadPattern, +// reporting that the pattern is malformed. +// +// To enable aborting on I/O errors, the WithFailOnIOErrors option can be +// passed. +// +// Note: FilepathGlob is a convenience function that is meant as a drop-in +// replacement for `path/filepath.Glob()` for users who don't need the +// complication of io/fs. Basically, it: +// - Runs `filepath.Clean()` and `ToSlash()` on the pattern +// - Runs `SplitPattern()` to get a base path and a pattern to Glob +// - Creates an FS object from the base path and `Glob()s` on the pattern +// - Joins the base path with all of the matches from `Glob()` +// +// Returned paths will use the system's path separator, just like +// `filepath.Glob()`. +// +// Note: the returned error doublestar.ErrBadPattern is not equal to +// filepath.ErrBadPattern. +// +func FilepathGlob(pattern string, opts ...GlobOption) (matches []string, err error) { + pattern = filepath.Clean(pattern) + pattern = filepath.ToSlash(pattern) + base, f := SplitPattern(pattern) + if f == "" || f == "." || f == ".." { + // some special cases to match filepath.Glob behavior + if !ValidatePathPattern(pattern) { + return nil, ErrBadPattern + } + + if filepath.Separator != '\\' { + pattern = unescapeMeta(pattern) + } + + if _, err = os.Lstat(pattern); err != nil { + g := newGlob(opts...) + if errors.Is(err, os.ErrNotExist) { + return nil, g.handlePatternNotExist(true) + } + return nil, g.forwardErrIfFailOnIOErrors(err) + } + return []string{filepath.FromSlash(pattern)}, nil + } + + fs := os.DirFS(base) + if matches, err = Glob(fs, f, opts...); err != nil { + return nil, err + } + for i := range matches { + // use path.Join because we used ToSlash above to ensure our paths are made + // of forward slashes, no matter what the system uses + matches[i] = filepath.FromSlash(path.Join(base, matches[i])) + } + return +} + +// Finds the next comma, but ignores any commas that appear inside nested `{}`. +// Assumes that each opening bracket has a corresponding closing bracket. +func indexNextAlt(s string, allowEscaping bool) int { + alts := 1 + l := len(s) + for i := 0; i < l; i++ { + if allowEscaping && s[i] == '\\' { + // skip next byte + i++ + } else if s[i] == '{' { + alts++ + } else if s[i] == '}' { + alts-- + } else if s[i] == ',' && alts == 1 { + return i + } + } + return -1 +} + +var metaReplacer = strings.NewReplacer("\\*", "*", "\\?", "?", "\\[", "[", "\\]", "]", "\\{", "{", "\\}", "}") + +// Unescapes meta characters (*?[]{}) +func unescapeMeta(pattern string) string { + return metaReplacer.Replace(pattern) +} diff --git a/vendor/github.com/bmatcuk/doublestar/v4/validate.go b/vendor/github.com/bmatcuk/doublestar/v4/validate.go new file mode 100644 index 00000000..c689b9eb --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/validate.go @@ -0,0 +1,82 @@ +package doublestar + +import "path/filepath" + +// Validate a pattern. Patterns are validated while they run in Match(), +// PathMatch(), and Glob(), so, you normally wouldn't need to call this. +// However, there are cases where this might be useful: for example, if your +// program allows a user to enter a pattern that you'll run at a later time, +// you might want to validate it. +// +// ValidatePattern assumes your pattern uses '/' as the path separator. +// +func ValidatePattern(s string) bool { + return doValidatePattern(s, '/') +} + +// Like ValidatePattern, only uses your OS path separator. In other words, use +// ValidatePattern if you would normally use Match() or Glob(). Use +// ValidatePathPattern if you would normally use PathMatch(). Keep in mind, +// Glob() requires '/' separators, even if your OS uses something else. +// +func ValidatePathPattern(s string) bool { + return doValidatePattern(s, filepath.Separator) +} + +func doValidatePattern(s string, separator rune) bool { + altDepth := 0 + l := len(s) +VALIDATE: + for i := 0; i < l; i++ { + switch s[i] { + case '\\': + if separator != '\\' { + // skip the next byte - return false if there is no next byte + if i++; i >= l { + return false + } + } + continue + + case '[': + if i++; i >= l { + // class didn't end + return false + } + if s[i] == '^' || s[i] == '!' { + i++ + } + if i >= l || s[i] == ']' { + // class didn't end or empty character class + return false + } + + for ; i < l; i++ { + if separator != '\\' && s[i] == '\\' { + i++ + } else if s[i] == ']' { + // looks good + continue VALIDATE + } + } + + // class didn't end + return false + + case '{': + altDepth++ + continue + + case '}': + if altDepth == 0 { + // alt end without a corresponding start + return false + } + altDepth-- + continue + } + } + + // valid as long as all alts are closed + return altDepth == 0 +} diff --git a/vendor/github.com/hashicorp/go-version/CHANGELOG.md b/vendor/github.com/hashicorp/go-version/CHANGELOG.md index 5f16dd14..6d48174b 100644 --- a/vendor/github.com/hashicorp/go-version/CHANGELOG.md +++ b/vendor/github.com/hashicorp/go-version/CHANGELOG.md @@ -1,3 +1,22 @@ +# 1.7.0 (May 24, 2024) + +ENHANCEMENTS: + +- Remove `reflect` dependency ([#91](https://github.com/hashicorp/go-version/pull/91)) +- Implement the `database/sql.Scanner` and `database/sql/driver.Value` interfaces for `Version` ([#133](https://github.com/hashicorp/go-version/pull/133)) + +INTERNAL: + +- [COMPLIANCE] Add Copyright and License Headers ([#115](https://github.com/hashicorp/go-version/pull/115)) +- [COMPLIANCE] Update MPL-2.0 LICENSE ([#105](https://github.com/hashicorp/go-version/pull/105)) +- Bump actions/cache from 3.0.11 to 3.2.5 ([#116](https://github.com/hashicorp/go-version/pull/116)) +- Bump actions/checkout from 3.2.0 to 3.3.0 ([#111](https://github.com/hashicorp/go-version/pull/111)) +- Bump actions/upload-artifact from 3.1.1 to 3.1.2 ([#112](https://github.com/hashicorp/go-version/pull/112)) +- GHA Migration ([#103](https://github.com/hashicorp/go-version/pull/103)) +- github: Pin external GitHub Actions to hashes ([#107](https://github.com/hashicorp/go-version/pull/107)) +- SEC-090: Automated trusted workflow pinning (2023-04-05) ([#124](https://github.com/hashicorp/go-version/pull/124)) +- update readme ([#104](https://github.com/hashicorp/go-version/pull/104)) + # 1.6.0 (June 28, 2022) FEATURES: diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE index c33dcc7c..1409d6ab 100644 --- a/vendor/github.com/hashicorp/go-version/LICENSE +++ b/vendor/github.com/hashicorp/go-version/LICENSE @@ -1,3 +1,5 @@ +Copyright (c) 2014 HashiCorp, Inc. + Mozilla Public License, version 2.0 1. Definitions diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md index 4d250509..4b7806cd 100644 --- a/vendor/github.com/hashicorp/go-version/README.md +++ b/vendor/github.com/hashicorp/go-version/README.md @@ -1,5 +1,5 @@ # Versioning Library for Go -[![Build Status](https://circleci.com/gh/hashicorp/go-version/tree/main.svg?style=svg)](https://circleci.com/gh/hashicorp/go-version/tree/main) +![Build Status](https://github.com/hashicorp/go-version/actions/workflows/go-tests.yml/badge.svg) [![GoDoc](https://godoc.org/github.com/hashicorp/go-version?status.svg)](https://godoc.org/github.com/hashicorp/go-version) go-version is a library for parsing versions and version constraints, diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go index da5d1aca..29bdc4d2 100644 --- a/vendor/github.com/hashicorp/go-version/constraint.go +++ b/vendor/github.com/hashicorp/go-version/constraint.go @@ -1,8 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package version import ( "fmt" - "reflect" "regexp" "sort" "strings" @@ -199,7 +201,7 @@ func prereleaseCheck(v, c *Version) bool { case cPre && vPre: // A constraint with a pre-release can only match a pre-release version // with the same base segments. - return reflect.DeepEqual(c.Segments64(), v.Segments64()) + return v.equalSegments(c) case !cPre && vPre: // A constraint without a pre-release can only match a version without a diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go index e87df699..7c683c28 100644 --- a/vendor/github.com/hashicorp/go-version/version.go +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -1,9 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package version import ( "bytes" + "database/sql/driver" "fmt" - "reflect" "regexp" "strconv" "strings" @@ -117,11 +120,8 @@ func (v *Version) Compare(other *Version) int { return 0 } - segmentsSelf := v.Segments64() - segmentsOther := other.Segments64() - // If the segments are the same, we must compare on prerelease info - if reflect.DeepEqual(segmentsSelf, segmentsOther) { + if v.equalSegments(other) { preSelf := v.Prerelease() preOther := other.Prerelease() if preSelf == "" && preOther == "" { @@ -137,6 +137,8 @@ func (v *Version) Compare(other *Version) int { return comparePrereleases(preSelf, preOther) } + segmentsSelf := v.Segments64() + segmentsOther := other.Segments64() // Get the highest specificity (hS), or if they're equal, just use segmentSelf length lenSelf := len(segmentsSelf) lenOther := len(segmentsOther) @@ -160,7 +162,7 @@ func (v *Version) Compare(other *Version) int { // this means Other had the lower specificity // Check to see if the remaining segments in Self are all zeros - if !allZero(segmentsSelf[i:]) { - //if not, it means that Self has to be greater than Other + // if not, it means that Self has to be greater than Other return 1 } break @@ -180,6 +182,21 @@ func (v *Version) Compare(other *Version) int { return 0 } +func (v *Version) equalSegments(other *Version) bool { + segmentsSelf := v.Segments64() + segmentsOther := other.Segments64() + + if len(segmentsSelf) != len(segmentsOther) { + return false + } + for i, v := range segmentsSelf { + if v != segmentsOther[i] { + return false + } + } + return true +} + func allZero(segs []int64) bool { for _, s := range segs { if s != 0 { @@ -405,3 +422,20 @@ func (v *Version) UnmarshalText(b []byte) error { func (v *Version) MarshalText() ([]byte, error) { return []byte(v.String()), nil } + +// Scan implements the sql.Scanner interface. +func (v *Version) Scan(src interface{}) error { + switch src := src.(type) { + case string: + return v.UnmarshalText([]byte(src)) + case nil: + return nil + default: + return fmt.Errorf("cannot scan %T as Version", src) + } +} + +// Value implements the driver.Valuer interface. +func (v *Version) Value() (driver.Value, error) { + return v.String(), nil +} diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go index cc888d43..83547fe1 100644 --- a/vendor/github.com/hashicorp/go-version/version_collection.go +++ b/vendor/github.com/hashicorp/go-version/version_collection.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package version // Collection is a type that implements the sort.Interface interface diff --git a/vendor/github.com/hashicorp/hc-install/.go-version b/vendor/github.com/hashicorp/hc-install/.go-version index ce2dd535..2a0ba77c 100644 --- a/vendor/github.com/hashicorp/hc-install/.go-version +++ b/vendor/github.com/hashicorp/hc-install/.go-version @@ -1 +1 @@ -1.21.5 +1.22.4 diff --git a/vendor/github.com/hashicorp/hc-install/README.md b/vendor/github.com/hashicorp/hc-install/README.md index 6e78b5a6..0d55191b 100644 --- a/vendor/github.com/hashicorp/hc-install/README.md +++ b/vendor/github.com/hashicorp/hc-install/README.md @@ -14,55 +14,55 @@ the library in ad-hoc or CI shell scripting outside of Go. `hc-install` does **not**: - - Determine suitable installation path based on target system. e.g. in `/usr/bin` or `/usr/local/bin` on Unix based system. - - Deal with execution of installed binaries (via service files or otherwise). - - Upgrade existing binaries on your system. - - Add nor link downloaded binaries to your `$PATH`. +- Determine suitable installation path based on target system. e.g. in `/usr/bin` or `/usr/local/bin` on Unix based system. +- Deal with execution of installed binaries (via service files or otherwise). +- Upgrade existing binaries on your system. +- Add nor link downloaded binaries to your `$PATH`. ## API The `Installer` offers a few high-level methods: - - `Ensure(context.Context, []src.Source)` to find, install, or build a product version - - `Install(context.Context, []src.Installable)` to install a product version +- `Ensure(context.Context, []src.Source)` to find, install, or build a product version +- `Install(context.Context, []src.Installable)` to install a product version ### Sources The `Installer` methods accept number of different `Source` types. Each comes with different trade-offs described below. - - `fs.{AnyVersion,ExactVersion,Version}` - Finds a binary in `$PATH` (or additional paths) - - **Pros:** - - This is most convenient when you already have the product installed on your system +- `fs.{AnyVersion,ExactVersion,Version}` - Finds a binary in `$PATH` (or additional paths) + - **Pros:** + - This is most convenient when you already have the product installed on your system which you already manage. - - **Cons:** - - Only relies on a single version, expects _you_ to manage the installation - - _Not recommended_ for any environment where product installation is not controlled or managed by you (e.g. default GitHub Actions image managed by GitHub) - - `releases.{LatestVersion,ExactVersion}` - Downloads, verifies & installs any known product from `releases.hashicorp.com` - - **Pros:** - - Fast and reliable way of obtaining any pre-built version of any product - - Allows installation of enterprise versions - - **Cons:** - - Installation may consume some bandwidth, disk space and a little time - - Potentially less stable builds (see `checkpoint` below) - - `checkpoint.LatestVersion` - Downloads, verifies & installs any known product available in HashiCorp Checkpoint - - **Pros:** - - Checkpoint typically contains only product versions considered stable - - **Cons:** - - Installation may consume some bandwidth, disk space and a little time - - Currently doesn't allow installation of old versions or enterprise versions (see `releases` above) - - `build.GitRevision` - Clones raw source code and builds the product from it - - **Pros:** - - Useful for catching bugs and incompatibilities as early as possible (prior to product release). - - **Cons:** - - Building from scratch can consume significant amount of time & resources (CPU, memory, bandwith, disk space) - - There are no guarantees that build instructions will always be up-to-date - - There's increased likelihood of build containing bugs prior to release - - Any CI builds relying on this are likely to be fragile + - **Cons:** + - Only relies on a single version, expects _you_ to manage the installation + - _Not recommended_ for any environment where product installation is not controlled or managed by you (e.g. default GitHub Actions image managed by GitHub) +- `releases.{LatestVersion,ExactVersion}` - Downloads, verifies & installs any known product from `releases.hashicorp.com` + - **Pros:** + - Fast and reliable way of obtaining any pre-built version of any product + - Allows installation of enterprise versions + - **Cons:** + - Installation may consume some bandwidth, disk space and a little time + - Potentially less stable builds (see `checkpoint` below) +- `checkpoint.LatestVersion` - Downloads, verifies & installs any known product available in HashiCorp Checkpoint + - **Pros:** + - Checkpoint typically contains only product versions considered stable + - **Cons:** + - Installation may consume some bandwidth, disk space and a little time + - Currently doesn't allow installation of old versions or enterprise versions (see `releases` above) +- `build.GitRevision` - Clones raw source code and builds the product from it + - **Pros:** + - Useful for catching bugs and incompatibilities as early as possible (prior to product release). + - **Cons:** + - Building from scratch can consume significant amount of time & resources (CPU, memory, bandwidth, disk space) + - There are no guarantees that build instructions will always be up-to-date + - There's increased likelihood of build containing bugs prior to release + - Any CI builds relying on this are likely to be fragile ## Example Usage -See examples at https://pkg.go.dev/github.com/hashicorp/hc-install#example-Installer. +See examples at . ## CLI @@ -70,9 +70,9 @@ In addition to the Go library, which is the intended primary use case of `hc-ins The CLI comes with some trade-offs: - - more limited interface compared to the flexible Go API (installs specific versions of products via `releases.ExactVersion`) - - minimal environment pre-requisites (no need to compile Go code) - - see ["hc-install is not a package manager"](https://github.com/hashicorp/hc-install#hc-install-is-not-a-package-manager) +- more limited interface compared to the flexible Go API (installs specific versions of products via `releases.ExactVersion`) +- minimal environment pre-requisites (no need to compile Go code) +- see ["hc-install is not a package manager"](https://github.com/hashicorp/hc-install#hc-install-is-not-a-package-manager) ### Installation @@ -82,7 +82,7 @@ Given that one of the key roles of the CLI/library is integrity checking, you sh [Homebrew](https://brew.sh) -``` +```sh brew install hashicorp/tap/hc-install ``` @@ -102,19 +102,23 @@ You can follow the instructions in the [Official Packaging Guide](https://www.ha ### Usage -``` +```text Usage: hc-install install [options] -version This command installs a HashiCorp product. Options: -version [REQUIRED] Version of product to install. - -path Path to directory where the product will be installed. Defaults - to current working directory. + -path Path to directory where the product will be installed. + Defaults to current working directory. + -log-file Path to file where logs will be written. /dev/stdout + or /dev/stderr can be used to log to STDOUT/STDERR. ``` + ```sh hc-install install -version 1.3.7 terraform ``` -``` + +```sh hc-install: will install terraform@1.3.7 installed terraform@1.3.7 to /current/working/dir/terraform ``` diff --git a/vendor/github.com/hashicorp/hc-install/checkpoint/latest_version.go b/vendor/github.com/hashicorp/hc-install/checkpoint/latest_version.go index 2cd5379f..a382cb10 100644 --- a/vendor/github.com/hashicorp/hc-install/checkpoint/latest_version.go +++ b/vendor/github.com/hashicorp/hc-install/checkpoint/latest_version.go @@ -6,7 +6,7 @@ package checkpoint import ( "context" "fmt" - "io/ioutil" + "io" "log" "os" "path/filepath" @@ -24,7 +24,7 @@ import ( var ( defaultTimeout = 30 * time.Second - discardLogger = log.New(ioutil.Discard, "", 0) + discardLogger = log.New(io.Discard, "", 0) ) // LatestVersion installs the latest version known to Checkpoint @@ -35,6 +35,10 @@ type LatestVersion struct { SkipChecksumVerification bool InstallDir string + // LicenseDir represents directory path where to install license files. + // If empty, license files will placed in the same directory as the binary. + LicenseDir string + // ArmoredPublicKey is a public PGP key in ASCII/armor format to use // instead of built-in pubkey to verify signature of downloaded checksums ArmoredPublicKey string @@ -101,7 +105,7 @@ func (lv *LatestVersion) Install(ctx context.Context) (string, error) { if dstDir == "" { var err error dirName := fmt.Sprintf("%s_*", lv.Product.Name) - dstDir, err = ioutil.TempDir("", dirName) + dstDir, err = os.MkdirTemp("", dirName) if err != nil { return "", err } @@ -126,9 +130,11 @@ func (lv *LatestVersion) Install(ctx context.Context) (string, error) { if lv.ArmoredPublicKey != "" { d.ArmoredPublicKey = lv.ArmoredPublicKey } - zipFilePath, err := d.DownloadAndUnpack(ctx, pv, dstDir, "") - if zipFilePath != "" { - lv.pathsToRemove = append(lv.pathsToRemove, zipFilePath) + + licenseDir := lv.LicenseDir + up, err := d.DownloadAndUnpack(ctx, pv, dstDir, licenseDir) + if up != nil { + lv.pathsToRemove = append(lv.pathsToRemove, up.PathsToRemove...) } if err != nil { return "", err diff --git a/vendor/github.com/hashicorp/hc-install/fs/fs.go b/vendor/github.com/hashicorp/hc-install/fs/fs.go index 216df2c2..ac6f5cf9 100644 --- a/vendor/github.com/hashicorp/hc-install/fs/fs.go +++ b/vendor/github.com/hashicorp/hc-install/fs/fs.go @@ -4,14 +4,14 @@ package fs import ( - "io/ioutil" + "io" "log" "time" ) var ( defaultTimeout = 10 * time.Second - discardLogger = log.New(ioutil.Discard, "", 0) + discardLogger = log.New(io.Discard, "", 0) ) type fileCheckFunc func(path string) error diff --git a/vendor/github.com/hashicorp/hc-install/fs/fs_unix.go b/vendor/github.com/hashicorp/hc-install/fs/fs_unix.go index eebd98b8..5aed8444 100644 --- a/vendor/github.com/hashicorp/hc-install/fs/fs_unix.go +++ b/vendor/github.com/hashicorp/hc-install/fs/fs_unix.go @@ -16,9 +16,7 @@ import ( func lookupDirs(extraDirs []string) []string { pathVar := os.Getenv("PATH") dirs := filepath.SplitList(pathVar) - for _, ep := range extraDirs { - dirs = append(dirs, ep) - } + dirs = append(dirs, extraDirs...) return dirs } diff --git a/vendor/github.com/hashicorp/hc-install/installer.go b/vendor/github.com/hashicorp/hc-install/installer.go index 6c704eed..01c1fdee 100644 --- a/vendor/github.com/hashicorp/hc-install/installer.go +++ b/vendor/github.com/hashicorp/hc-install/installer.go @@ -6,7 +6,7 @@ package install import ( "context" "fmt" - "io/ioutil" + "io" "log" "github.com/hashicorp/go-multierror" @@ -23,7 +23,7 @@ type Installer struct { type RemoveFunc func(ctx context.Context) error func NewInstaller() *Installer { - discardLogger := log.New(ioutil.Discard, "", 0) + discardLogger := log.New(io.Discard, "", 0) return &Installer{ logger: discardLogger, } diff --git a/vendor/github.com/hashicorp/hc-install/internal/build/go_build.go b/vendor/github.com/hashicorp/hc-install/internal/build/go_build.go index 504bf45a..6eef755b 100644 --- a/vendor/github.com/hashicorp/hc-install/internal/build/go_build.go +++ b/vendor/github.com/hashicorp/hc-install/internal/build/go_build.go @@ -7,7 +7,7 @@ import ( "bytes" "context" "fmt" - "io/ioutil" + "io" "log" "os" "os/exec" @@ -17,7 +17,7 @@ import ( "golang.org/x/mod/modfile" ) -var discardLogger = log.New(ioutil.Discard, "", 0) +var discardLogger = log.New(io.Discard, "", 0) // GoBuild represents a Go builder (to run "go build") type GoBuild struct { @@ -161,7 +161,7 @@ type CleanupFunc func(context.Context) func guessRequiredGoVersion(repoDir string) (*version.Version, bool) { goEnvFile := filepath.Join(repoDir, ".go-version") if fi, err := os.Stat(goEnvFile); err == nil && !fi.IsDir() { - b, err := ioutil.ReadFile(goEnvFile) + b, err := os.ReadFile(goEnvFile) if err != nil { return nil, false } @@ -174,7 +174,7 @@ func guessRequiredGoVersion(repoDir string) (*version.Version, bool) { goModFile := filepath.Join(repoDir, "go.mod") if fi, err := os.Stat(goModFile); err == nil && !fi.IsDir() { - b, err := ioutil.ReadFile(goModFile) + b, err := os.ReadFile(goModFile) if err != nil { return nil, false } diff --git a/vendor/github.com/hashicorp/hc-install/internal/httpclient/httpclient.go b/vendor/github.com/hashicorp/hc-install/internal/httpclient/httpclient.go index a9503dfd..30c269b7 100644 --- a/vendor/github.com/hashicorp/hc-install/internal/httpclient/httpclient.go +++ b/vendor/github.com/hashicorp/hc-install/internal/httpclient/httpclient.go @@ -7,23 +7,18 @@ import ( "fmt" "net/http" - "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-retryablehttp" "github.com/hashicorp/hc-install/version" ) // NewHTTPClient provides a pre-configured http.Client // e.g. with relevant User-Agent header func NewHTTPClient() *http.Client { - client := cleanhttp.DefaultClient() - - userAgent := fmt.Sprintf("hc-install/%s", version.Version()) - - cli := cleanhttp.DefaultPooledClient() - cli.Transport = &userAgentRoundTripper{ - userAgent: userAgent, - inner: cli.Transport, + client := retryablehttp.NewClient().StandardClient() + client.Transport = &userAgentRoundTripper{ + userAgent: fmt.Sprintf("hc-install/%s", version.Version()), + inner: client.Transport, } - return client } diff --git a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/checksum_downloader.go b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/checksum_downloader.go index 843de8cd..59dd1a1f 100644 --- a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/checksum_downloader.go +++ b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/checksum_downloader.go @@ -55,7 +55,7 @@ func (cd *ChecksumDownloader) DownloadAndVerifyChecksums(ctx context.Context) (C client := httpclient.NewHTTPClient() sigURL := fmt.Sprintf("%s/%s/%s/%s", cd.BaseURL, url.PathEscape(cd.ProductVersion.Name), - url.PathEscape(cd.ProductVersion.RawVersion), + url.PathEscape(cd.ProductVersion.Version.String()), url.PathEscape(sigFilename)) cd.Logger.Printf("downloading signature from %s", sigURL) @@ -76,7 +76,7 @@ func (cd *ChecksumDownloader) DownloadAndVerifyChecksums(ctx context.Context) (C shasumsURL := fmt.Sprintf("%s/%s/%s/%s", cd.BaseURL, url.PathEscape(cd.ProductVersion.Name), - url.PathEscape(cd.ProductVersion.RawVersion), + url.PathEscape(cd.ProductVersion.Version.String()), url.PathEscape(cd.ProductVersion.SHASUMS)) cd.Logger.Printf("downloading checksums from %s", shasumsURL) diff --git a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/downloader.go b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/downloader.go index 146c1cf0..2f937f10 100644 --- a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/downloader.go +++ b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/downloader.go @@ -10,7 +10,6 @@ import ( "crypto/sha256" "fmt" "io" - "io/ioutil" "log" "net/http" "net/url" @@ -29,14 +28,18 @@ type Downloader struct { BaseURL string } -func (d *Downloader) DownloadAndUnpack(ctx context.Context, pv *ProductVersion, binDir string, licenseDir string) (zipFilePath string, err error) { +type UnpackedProduct struct { + PathsToRemove []string +} + +func (d *Downloader) DownloadAndUnpack(ctx context.Context, pv *ProductVersion, binDir string, licenseDir string) (up *UnpackedProduct, err error) { if len(pv.Builds) == 0 { - return "", fmt.Errorf("no builds found for %s %s", pv.Name, pv.Version) + return nil, fmt.Errorf("no builds found for %s %s", pv.Name, pv.Version) } pb, ok := pv.Builds.FilterBuild(runtime.GOOS, runtime.GOARCH, "zip") if !ok { - return "", fmt.Errorf("no ZIP archive found for %s %s %s/%s", + return nil, fmt.Errorf("no ZIP archive found for %s %s %s/%s", pv.Name, pv.Version, runtime.GOOS, runtime.GOARCH) } @@ -50,48 +53,35 @@ func (d *Downloader) DownloadAndUnpack(ctx context.Context, pv *ProductVersion, } verifiedChecksums, err := v.DownloadAndVerifyChecksums(ctx) if err != nil { - return "", err + return nil, err } var ok bool verifiedChecksum, ok = verifiedChecksums[pb.Filename] if !ok { - return "", fmt.Errorf("no checksum found for %q", pb.Filename) + return nil, fmt.Errorf("no checksum found for %q", pb.Filename) } } client := httpclient.NewHTTPClient() - archiveURL := pb.URL - if d.BaseURL != "" { - // ensure that absolute download links from mocked responses - // are still pointing to the mock server if one is set - baseURL, err := url.Parse(d.BaseURL) - if err != nil { - return "", err - } - - u, err := url.Parse(archiveURL) - if err != nil { - return "", err - } - u.Scheme = baseURL.Scheme - u.Host = baseURL.Host - archiveURL = u.String() + archiveURL, err := determineArchiveURL(pb.URL, d.BaseURL) + if err != nil { + return nil, err } d.Logger.Printf("downloading archive from %s", archiveURL) req, err := http.NewRequestWithContext(ctx, http.MethodGet, archiveURL, nil) if err != nil { - return "", fmt.Errorf("failed to create request for %q: %w", archiveURL, err) + return nil, fmt.Errorf("failed to create request for %q: %w", archiveURL, err) } resp, err := client.Do(req) if err != nil { - return "", err + return nil, err } if resp.StatusCode != 200 { - return "", fmt.Errorf("failed to download ZIP archive from %q: %s", archiveURL, resp.Status) + return nil, fmt.Errorf("failed to download ZIP archive from %q: %s", archiveURL, resp.Status) } defer resp.Body.Close() @@ -100,19 +90,22 @@ func (d *Downloader) DownloadAndUnpack(ctx context.Context, pv *ProductVersion, contentType := resp.Header.Get("content-type") if !contentTypeIsZip(contentType) { - return "", fmt.Errorf("unexpected content-type: %s (expected any of %q)", + return nil, fmt.Errorf("unexpected content-type: %s (expected any of %q)", contentType, zipMimeTypes) } expectedSize := resp.ContentLength - pkgFile, err := ioutil.TempFile("", pb.Filename) + pkgFile, err := os.CreateTemp("", pb.Filename) if err != nil { - return "", err + return nil, err } defer pkgFile.Close() pkgFilePath, err := filepath.Abs(pkgFile.Name()) + up = &UnpackedProduct{} + up.PathsToRemove = append(up.PathsToRemove, pkgFilePath) + d.Logger.Printf("copying %q (%d bytes) to %s", pb.Filename, expectedSize, pkgFile.Name()) var bytesCopied int64 @@ -123,12 +116,12 @@ func (d *Downloader) DownloadAndUnpack(ctx context.Context, pv *ProductVersion, bytesCopied, err = io.Copy(h, r) if err != nil { - return "", err + return nil, err } calculatedSum := h.Sum(nil) if !bytes.Equal(calculatedSum, verifiedChecksum) { - return pkgFilePath, fmt.Errorf( + return up, fmt.Errorf( "checksum mismatch (expected: %x, got: %x)", verifiedChecksum, calculatedSum, ) @@ -136,14 +129,14 @@ func (d *Downloader) DownloadAndUnpack(ctx context.Context, pv *ProductVersion, } else { bytesCopied, err = io.Copy(pkgFile, pkgReader) if err != nil { - return pkgFilePath, err + return up, err } } d.Logger.Printf("copied %d bytes to %s", bytesCopied, pkgFile.Name()) if expectedSize != 0 && bytesCopied != int64(expectedSize) { - return pkgFilePath, fmt.Errorf( + return up, fmt.Errorf( "unexpected size (downloaded: %d, expected: %d)", bytesCopied, expectedSize, ) @@ -151,7 +144,7 @@ func (d *Downloader) DownloadAndUnpack(ctx context.Context, pv *ProductVersion, r, err := zip.OpenReader(pkgFile.Name()) if err != nil { - return pkgFilePath, err + return up, err } defer r.Close() @@ -163,31 +156,37 @@ func (d *Downloader) DownloadAndUnpack(ctx context.Context, pv *ProductVersion, } srcFile, err := f.Open() if err != nil { - return pkgFilePath, err + return up, err } // Determine the appropriate destination file path dstDir := binDir + // for license files, use binDir if licenseDir is not set if isLicenseFile(f.Name) && licenseDir != "" { dstDir = licenseDir } d.Logger.Printf("unpacking %s to %s", f.Name, dstDir) dstPath := filepath.Join(dstDir, f.Name) + + if isLicenseFile(f.Name) { + up.PathsToRemove = append(up.PathsToRemove, dstPath) + } + dstFile, err := os.Create(dstPath) if err != nil { - return pkgFilePath, err + return up, err } _, err = io.Copy(dstFile, srcFile) if err != nil { - return pkgFilePath, err + return up, err } srcFile.Close() dstFile.Close() } - return pkgFilePath, nil + return up, nil } // The production release site uses consistent single mime type @@ -207,11 +206,13 @@ func contentTypeIsZip(contentType string) bool { return false } -// Enterprise products have a few additional license files -// that need to be extracted to a separate directory +// Product archives may have a few license files +// which may be extracted to a separate directory +// and may need to be tracked for later cleanup. var licenseFiles = []string{ "EULA.txt", "TermsOfEvaluation.txt", + "LICENSE.txt", } func isLicenseFile(filename string) bool { @@ -222,3 +223,28 @@ func isLicenseFile(filename string) bool { } return false } + +// determineArchiveURL determines the archive URL based on the base URL provided. +func determineArchiveURL(archiveURL, baseURL string) (string, error) { + // If custom URL is set, use that instead of the one from the JSON. + // Also ensures that absolute download links from mocked responses + // are still pointing to the mock server if one is set. + if baseURL == "" { + return archiveURL, nil + } + + base, err := url.Parse(baseURL) + if err != nil { + return "", err + } + + u, err := url.Parse(archiveURL) + if err != nil { + return "", err + } + + // Use base URL path and append the path from the archive URL. + newArchiveURL := base.JoinPath(u.Path) + + return newArchiveURL.String(), nil +} diff --git a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/product_version.go b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/product_version.go index 99b811a6..94152b13 100644 --- a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/product_version.go +++ b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/product_version.go @@ -9,8 +9,7 @@ import "github.com/hashicorp/go-version" // "consul 0.5.1". A ProductVersion may have one or more builds. type ProductVersion struct { Name string `json:"name"` - RawVersion string `json:"version"` - Version *version.Version `json:"-"` + Version *version.Version `json:"version"` SHASUMS string `json:"shasums,omitempty"` SHASUMSSig string `json:"shasums_signature,omitempty"` SHASUMSSigs []string `json:"shasums_signatures,omitempty"` diff --git a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/releases.go b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/releases.go index 755019f2..4c0bab00 100644 --- a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/releases.go +++ b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/releases.go @@ -7,7 +7,7 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "log" "net/http" "net/url" @@ -55,7 +55,7 @@ type Releases struct { func NewReleases() *Releases { return &Releases{ - logger: log.New(ioutil.Discard, "", 0), + logger: log.New(io.Discard, "", 0), BaseURL: defaultBaseURL, } } @@ -95,7 +95,7 @@ func (r *Releases) ListProductVersions(ctx context.Context, productName string) r.logger.Printf("received %s", resp.Status) - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } @@ -153,7 +153,7 @@ func (r *Releases) GetProductVersion(ctx context.Context, product string, versio r.logger.Printf("received %s", resp.Status) - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/hc-install/product/consul.go b/vendor/github.com/hashicorp/hc-install/product/consul.go index 9789d7c3..03b03260 100644 --- a/vendor/github.com/hashicorp/hc-install/product/consul.go +++ b/vendor/github.com/hashicorp/hc-install/product/consul.go @@ -17,10 +17,6 @@ import ( var consulVersionOutputRe = regexp.MustCompile(`Consul ` + simpleVersionRe) -var ( - v1_18 = version.Must(version.NewVersion("1.18")) -) - var Consul = Product{ Name: "consul", BinaryName: func() string { diff --git a/vendor/github.com/hashicorp/hc-install/releases/enterprise.go b/vendor/github.com/hashicorp/hc-install/releases/enterprise.go index 179d40d1..cfef088a 100644 --- a/vendor/github.com/hashicorp/hc-install/releases/enterprise.go +++ b/vendor/github.com/hashicorp/hc-install/releases/enterprise.go @@ -6,9 +6,6 @@ package releases import "fmt" type EnterpriseOptions struct { - // LicenseDir represents directory path where to install license files (required) - LicenseDir string - // Meta represents optional version metadata (e.g. hsm, fips1402) Meta string } @@ -25,12 +22,12 @@ func enterpriseVersionMetadata(eo *EnterpriseOptions) string { return metadata } -func validateEnterpriseOptions(eo *EnterpriseOptions) error { +func validateEnterpriseOptions(eo *EnterpriseOptions, licenseDir string) error { if eo == nil { return nil } - if eo.LicenseDir == "" { + if licenseDir == "" { return fmt.Errorf("LicenseDir must be provided when requesting enterprise versions") } diff --git a/vendor/github.com/hashicorp/hc-install/releases/exact_version.go b/vendor/github.com/hashicorp/hc-install/releases/exact_version.go index e42f4d23..597e9ae3 100644 --- a/vendor/github.com/hashicorp/hc-install/releases/exact_version.go +++ b/vendor/github.com/hashicorp/hc-install/releases/exact_version.go @@ -6,7 +6,6 @@ package releases import ( "context" "fmt" - "io/ioutil" "log" "os" "path/filepath" @@ -28,6 +27,10 @@ type ExactVersion struct { InstallDir string Timeout time.Duration + // LicenseDir represents directory path where to install license files + // (required for enterprise versions, optional for Community editions). + LicenseDir string + // Enterprise indicates installation of enterprise version (leave nil for Community editions) Enterprise *EnterpriseOptions @@ -37,7 +40,10 @@ type ExactVersion struct { // instead of built-in pubkey to verify signature of downloaded checksums ArmoredPublicKey string - apiBaseURL string + // ApiBaseURL is an optional field that specifies a custom URL to download the product from. + // If ApiBaseURL is set, the product will be downloaded from this base URL instead of the default site. + // Note: The directory structure of the custom URL must match the HashiCorp releases site (including the index.json files). + ApiBaseURL string logger *log.Logger pathsToRemove []string } @@ -70,7 +76,7 @@ func (ev *ExactVersion) Validate() error { return fmt.Errorf("unknown version") } - if err := validateEnterpriseOptions(ev.Enterprise); err != nil { + if err := validateEnterpriseOptions(ev.Enterprise, ev.LicenseDir); err != nil { return err } @@ -93,7 +99,7 @@ func (ev *ExactVersion) Install(ctx context.Context) (string, error) { if dstDir == "" { var err error dirName := fmt.Sprintf("%s_*", ev.Product.Name) - dstDir, err = ioutil.TempDir("", dirName) + dstDir, err = os.MkdirTemp("", dirName) if err != nil { return "", err } @@ -103,8 +109,8 @@ func (ev *ExactVersion) Install(ctx context.Context) (string, error) { ev.log().Printf("will install into dir at %s", dstDir) rels := rjson.NewReleases() - if ev.apiBaseURL != "" { - rels.BaseURL = ev.apiBaseURL + if ev.ApiBaseURL != "" { + rels.BaseURL = ev.ApiBaseURL } rels.SetLogger(ev.log()) installVersion := ev.Version @@ -125,17 +131,14 @@ func (ev *ExactVersion) Install(ctx context.Context) (string, error) { if ev.ArmoredPublicKey != "" { d.ArmoredPublicKey = ev.ArmoredPublicKey } - if ev.apiBaseURL != "" { - d.BaseURL = ev.apiBaseURL + if ev.ApiBaseURL != "" { + d.BaseURL = ev.ApiBaseURL } - licenseDir := "" - if ev.Enterprise != nil { - licenseDir = ev.Enterprise.LicenseDir - } - zipFilePath, err := d.DownloadAndUnpack(ctx, pv, dstDir, licenseDir) - if zipFilePath != "" { - ev.pathsToRemove = append(ev.pathsToRemove, zipFilePath) + licenseDir := ev.LicenseDir + up, err := d.DownloadAndUnpack(ctx, pv, dstDir, licenseDir) + if up != nil { + ev.pathsToRemove = append(ev.pathsToRemove, up.PathsToRemove...) } if err != nil { return "", err diff --git a/vendor/github.com/hashicorp/hc-install/releases/latest_version.go b/vendor/github.com/hashicorp/hc-install/releases/latest_version.go index 9893b223..ee70782b 100644 --- a/vendor/github.com/hashicorp/hc-install/releases/latest_version.go +++ b/vendor/github.com/hashicorp/hc-install/releases/latest_version.go @@ -6,7 +6,6 @@ package releases import ( "context" "fmt" - "io/ioutil" "log" "os" "path/filepath" @@ -28,6 +27,10 @@ type LatestVersion struct { Timeout time.Duration IncludePrereleases bool + // LicenseDir represents directory path where to install license files + // (required for enterprise versions, optional for Community editions). + LicenseDir string + // Enterprise indicates installation of enterprise version (leave nil for Community editions) Enterprise *EnterpriseOptions @@ -37,7 +40,10 @@ type LatestVersion struct { // instead of built-in pubkey to verify signature of downloaded checksums ArmoredPublicKey string - apiBaseURL string + // ApiBaseURL is an optional field that specifies a custom URL to download the product from. + // If ApiBaseURL is set, the product will be downloaded from this base URL instead of the default site. + // Note: The directory structure of the custom URL must match the HashiCorp releases site (including the index.json files). + ApiBaseURL string logger *log.Logger pathsToRemove []string } @@ -66,7 +72,7 @@ func (lv *LatestVersion) Validate() error { return fmt.Errorf("invalid binary name: %q", lv.Product.BinaryName()) } - if err := validateEnterpriseOptions(lv.Enterprise); err != nil { + if err := validateEnterpriseOptions(lv.Enterprise, lv.LicenseDir); err != nil { return err } @@ -89,7 +95,7 @@ func (lv *LatestVersion) Install(ctx context.Context) (string, error) { if dstDir == "" { var err error dirName := fmt.Sprintf("%s_*", lv.Product.Name) - dstDir, err = ioutil.TempDir("", dirName) + dstDir, err = os.MkdirTemp("", dirName) if err != nil { return "", err } @@ -99,8 +105,8 @@ func (lv *LatestVersion) Install(ctx context.Context) (string, error) { lv.log().Printf("will install into dir at %s", dstDir) rels := rjson.NewReleases() - if lv.apiBaseURL != "" { - rels.BaseURL = lv.apiBaseURL + if lv.ApiBaseURL != "" { + rels.BaseURL = lv.ApiBaseURL } rels.SetLogger(lv.log()) versions, err := rels.ListProductVersions(ctx, lv.Product.Name) @@ -126,16 +132,13 @@ func (lv *LatestVersion) Install(ctx context.Context) (string, error) { if lv.ArmoredPublicKey != "" { d.ArmoredPublicKey = lv.ArmoredPublicKey } - if lv.apiBaseURL != "" { - d.BaseURL = lv.apiBaseURL - } - licenseDir := "" - if lv.Enterprise != nil { - licenseDir = lv.Enterprise.LicenseDir + if lv.ApiBaseURL != "" { + d.BaseURL = lv.ApiBaseURL } - zipFilePath, err := d.DownloadAndUnpack(ctx, versionToInstall, dstDir, licenseDir) - if zipFilePath != "" { - lv.pathsToRemove = append(lv.pathsToRemove, zipFilePath) + licenseDir := lv.LicenseDir + up, err := d.DownloadAndUnpack(ctx, versionToInstall, dstDir, licenseDir) + if up != nil { + lv.pathsToRemove = append(lv.pathsToRemove, up.PathsToRemove...) } if err != nil { return "", err diff --git a/vendor/github.com/hashicorp/hc-install/releases/releases.go b/vendor/github.com/hashicorp/hc-install/releases/releases.go index 7bef49ba..a24db6c6 100644 --- a/vendor/github.com/hashicorp/hc-install/releases/releases.go +++ b/vendor/github.com/hashicorp/hc-install/releases/releases.go @@ -4,7 +4,7 @@ package releases import ( - "io/ioutil" + "io" "log" "time" ) @@ -12,5 +12,5 @@ import ( var ( defaultInstallTimeout = 30 * time.Second defaultListTimeout = 10 * time.Second - discardLogger = log.New(ioutil.Discard, "", 0) + discardLogger = log.New(io.Discard, "", 0) ) diff --git a/vendor/github.com/hashicorp/hc-install/releases/versions.go b/vendor/github.com/hashicorp/hc-install/releases/versions.go index 49b1af78..a4316090 100644 --- a/vendor/github.com/hashicorp/hc-install/releases/versions.go +++ b/vendor/github.com/hashicorp/hc-install/releases/versions.go @@ -30,8 +30,9 @@ type Versions struct { } type InstallationOptions struct { - Timeout time.Duration - Dir string + Timeout time.Duration + Dir string + LicenseDir string SkipChecksumVerification bool @@ -46,7 +47,7 @@ func (v *Versions) List(ctx context.Context) ([]src.Source, error) { return nil, fmt.Errorf("invalid product name: %q", v.Product.Name) } - if err := validateEnterpriseOptions(v.Enterprise); err != nil { + if err := validateEnterpriseOptions(v.Enterprise, v.Install.LicenseDir); err != nil { return nil, err } @@ -85,6 +86,7 @@ func (v *Versions) List(ctx context.Context) ([]src.Source, error) { Version: pv.Version, InstallDir: v.Install.Dir, Timeout: v.Install.Timeout, + LicenseDir: v.Install.LicenseDir, ArmoredPublicKey: v.Install.ArmoredPublicKey, SkipChecksumVerification: v.Install.SkipChecksumVerification, @@ -92,8 +94,7 @@ func (v *Versions) List(ctx context.Context) ([]src.Source, error) { if v.Enterprise != nil { ev.Enterprise = &EnterpriseOptions{ - Meta: v.Enterprise.Meta, - LicenseDir: v.Enterprise.LicenseDir, + Meta: v.Enterprise.Meta, } } diff --git a/vendor/github.com/hashicorp/hc-install/version/VERSION b/vendor/github.com/hashicorp/hc-install/version/VERSION index d2b13eb6..a3df0a69 100644 --- a/vendor/github.com/hashicorp/hc-install/version/VERSION +++ b/vendor/github.com/hashicorp/hc-install/version/VERSION @@ -1 +1 @@ -0.6.4 +0.8.0 diff --git a/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md b/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md index 2eebedbc..f81110dc 100644 --- a/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md +++ b/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md @@ -1,5 +1,16 @@ # HCL Changelog +## v2.21.0 (June 19, 2024) + +### Enhancements + +* Introduce `ParseTraversalPartial`, which allows traversals that include the splat (`[*]`) index operator. ([#673](https://github.com/hashicorp/hcl/pull/673)) +* ext/dynblock: Now accepts marked values in `for_each`, and will transfer those marks (as much as technically possible) to values in the generated blocks. ([#679](https://github.com/hashicorp/hcl/pull/679)) + +### Bugs Fixed + +* Expression evaluation will no longer panic if the splat operator is applied to an unknown value that has cty marks. ([#678](https://github.com/hashicorp/hcl/pull/678)) + ## v2.20.1 (March 26, 2024) ### Bugs Fixed diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go index 81597399..577a50fa 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go @@ -1780,7 +1780,7 @@ func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { if sourceVal.IsNull() { if autoUpgrade { - return cty.EmptyTupleVal, diags + return cty.EmptyTupleVal.WithSameMarks(sourceVal), diags } diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, @@ -1798,7 +1798,7 @@ func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { // If we don't even know the _type_ of our source value yet then // we'll need to defer all processing, since we can't decide our // result type either. - return cty.DynamicVal, diags + return cty.DynamicVal.WithSameMarks(sourceVal), diags } upgradedUnknown := false @@ -1813,13 +1813,14 @@ func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { // list of a single attribute, but we still need to check if that // attribute actually exists. if !sourceVal.IsKnown() { - sourceRng := sourceVal.Range() + unmarkedVal, _ := sourceVal.Unmark() + sourceRng := unmarkedVal.Range() if sourceRng.CouldBeNull() { upgradedUnknown = true } } - sourceVal = cty.TupleVal([]cty.Value{sourceVal}) + sourceVal = cty.TupleVal([]cty.Value{sourceVal}).WithSameMarks(sourceVal) sourceTy = sourceVal.Type() } @@ -1900,14 +1901,14 @@ func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { e.Item.clearValue(ctx) // clean up our temporary value if upgradedUnknown { - return cty.DynamicVal, diags + return cty.DynamicVal.WithMarks(marks), diags } if !isKnown { // We'll ingore the resultTy diagnostics in this case since they // will just be the same errors we saw while iterating above. ty, _ := resultTy() - return cty.UnknownVal(ty), diags + return cty.UnknownVal(ty).WithMarks(marks), diags } switch { @@ -1915,7 +1916,7 @@ func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { if len(vals) == 0 { ty, tyDiags := resultTy() diags = append(diags, tyDiags...) - return cty.ListValEmpty(ty.ElementType()), diags + return cty.ListValEmpty(ty.ElementType()).WithMarks(marks), diags } return cty.ListVal(vals).WithMarks(marks), diags default: diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_traversal.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_traversal.go index 3afa6ab0..f7d4062f 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_traversal.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_traversal.go @@ -4,8 +4,9 @@ package hclsyntax import ( - "github.com/hashicorp/hcl/v2" "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/hcl/v2" ) // ParseTraversalAbs parses an absolute traversal that is assumed to consume @@ -13,6 +14,26 @@ import ( // behavior is not supported here because traversals are not expected to // be parsed as part of a larger program. func (p *parser) ParseTraversalAbs() (hcl.Traversal, hcl.Diagnostics) { + return p.parseTraversal(false) +} + +// ParseTraversalPartial parses an absolute traversal that is permitted +// to contain splat ([*]) expressions. Only splat expressions within square +// brackets are permitted ([*]); splat expressions within attribute names are +// not permitted (.*). +// +// The meaning of partial here is that the traversal may be incomplete, in that +// any splat expression indicates reference to a potentially unknown number of +// elements. +// +// Traversals that include splats cannot be automatically traversed by HCL using +// the TraversalAbs or TraversalRel methods. Instead, the caller must handle +// the traversals manually. +func (p *parser) ParseTraversalPartial() (hcl.Traversal, hcl.Diagnostics) { + return p.parseTraversal(true) +} + +func (p *parser) parseTraversal(allowSplats bool) (hcl.Traversal, hcl.Diagnostics) { var ret hcl.Traversal var diags hcl.Diagnostics @@ -127,6 +148,34 @@ func (p *parser) ParseTraversalAbs() (hcl.Traversal, hcl.Diagnostics) { return ret, diags } + case TokenStar: + if allowSplats { + + p.Read() // Eat the star. + close := p.Read() + if close.Type != TokenCBrack { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed index brackets", + Detail: "Index key must be followed by a closing bracket.", + Subject: &close.Range, + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + + ret = append(ret, hcl.TraverseSplat{ + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }) + + if diags.HasErrors() { + return ret, diags + } + + continue + } + + // Otherwise, return the error below for the star. + fallthrough default: if next.Type == TokenStar { diags = append(diags, &hcl.Diagnostic{ diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/public.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/public.go index d56f8e50..17dc1ed4 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/public.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/public.go @@ -118,6 +118,37 @@ func ParseTraversalAbs(src []byte, filename string, start hcl.Pos) (hcl.Traversa return expr, diags } +// ParseTraversalPartial matches the behavior of ParseTraversalAbs except +// that it allows splat expressions ([*]) to appear in the traversal. +// +// The returned traversals are "partial" in that the splat expression indicates +// an unknown value for the index. +// +// Traversals that include splats cannot be automatically traversed by HCL using +// the TraversalAbs or TraversalRel methods. Instead, the caller must handle +// the traversals manually. +func ParseTraversalPartial(src []byte, filename string, start hcl.Pos) (hcl.Traversal, hcl.Diagnostics) { + tokens, diags := LexExpression(src, filename, start) + peeker := newPeeker(tokens, false) + parser := &parser{peeker: peeker} + + // Bare traverals are always parsed in "ignore newlines" mode, as if + // they were wrapped in parentheses. + parser.PushIncludeNewlines(false) + + expr, parseDiags := parser.ParseTraversalPartial() + diags = append(diags, parseDiags...) + + parser.PopIncludeNewlines() + + // Panic if the parser uses incorrect stack discipline with the peeker's + // newlines stack, since otherwise it will produce confusing downstream + // errors. + peeker.AssertEmptyIncludeNewlinesStack() + + return expr, diags +} + // LexConfig performs lexical analysis on the given buffer, treating it as a // whole HCL config file, and returns the resulting tokens. // diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/check/directory.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/check/directory.go new file mode 100644 index 00000000..3c3183b7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/check/directory.go @@ -0,0 +1,174 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package check + +import ( + "fmt" + "log" + "path/filepath" +) + +const ( + CdktfIndexDirectory = `cdktf` + + LegacyIndexDirectory = `website/docs` + LegacyDataSourcesDirectory = `d` + LegacyGuidesDirectory = `guides` + LegacyResourcesDirectory = `r` + LegacyFunctionsDirectory = `functions` + + RegistryIndexDirectory = `docs` + RegistryDataSourcesDirectory = `data-sources` + RegistryGuidesDirectory = `guides` + RegistryResourcesDirectory = `resources` + RegistryFunctionsDirectory = `functions` + + // Terraform Registry Storage Limits + // https://www.terraform.io/docs/registry/providers/docs.html#storage-limits + RegistryMaximumNumberOfFiles = 2000 + RegistryMaximumSizeOfFile = 500000 // 500KB + +) + +var ValidLegacyDirectories = []string{ + LegacyIndexDirectory, + LegacyIndexDirectory + "/" + LegacyDataSourcesDirectory, + LegacyIndexDirectory + "/" + LegacyGuidesDirectory, + LegacyIndexDirectory + "/" + LegacyResourcesDirectory, + LegacyIndexDirectory + "/" + LegacyFunctionsDirectory, +} + +var ValidRegistryDirectories = []string{ + RegistryIndexDirectory, + RegistryIndexDirectory + "/" + RegistryDataSourcesDirectory, + RegistryIndexDirectory + "/" + RegistryGuidesDirectory, + RegistryIndexDirectory + "/" + RegistryResourcesDirectory, + RegistryIndexDirectory + "/" + RegistryFunctionsDirectory, +} + +var ValidCdktfLanguages = []string{ + "csharp", + "go", + "java", + "python", + "typescript", +} + +var ValidLegacySubdirectories = []string{ + LegacyIndexDirectory, + LegacyDataSourcesDirectory, + LegacyGuidesDirectory, + LegacyResourcesDirectory, +} + +var ValidRegistrySubdirectories = []string{ + RegistryIndexDirectory, + RegistryDataSourcesDirectory, + RegistryGuidesDirectory, + RegistryResourcesDirectory, +} + +func InvalidDirectoriesCheck(dirPath string) error { + if IsValidRegistryDirectory(dirPath) { + return nil + } + + if IsValidLegacyDirectory(dirPath) { + return nil + } + + if IsValidCdktfDirectory(dirPath) { + return nil + } + + return fmt.Errorf("invalid Terraform Provider documentation directory found: %s", dirPath) + +} + +func MixedDirectoriesCheck(docFiles []string) error { + var legacyDirectoryFound bool + var registryDirectoryFound bool + err := fmt.Errorf("mixed Terraform Provider documentation directory layouts found, must use only legacy or registry layout") + + for _, file := range docFiles { + directory := filepath.Dir(file) + log.Printf("[DEBUG] Found directory: %s", directory) + + // Allow docs/ with other files + if IsValidRegistryDirectory(directory) && directory != RegistryIndexDirectory { + registryDirectoryFound = true + + if legacyDirectoryFound { + log.Printf("[DEBUG] Found mixed directories") + return err + } + } + + if IsValidLegacyDirectory(directory) { + legacyDirectoryFound = true + + if registryDirectoryFound { + log.Printf("[DEBUG] Found mixed directories") + return err + } + } + } + + return nil +} + +func IsValidLegacyDirectory(directory string) bool { + for _, validLegacyDirectory := range ValidLegacyDirectories { + if directory == filepath.FromSlash(validLegacyDirectory) { + return true + } + } + + return false +} + +func IsValidRegistryDirectory(directory string) bool { + for _, validRegistryDirectory := range ValidRegistryDirectories { + if directory == filepath.FromSlash(validRegistryDirectory) { + return true + } + } + + return false +} + +func IsValidCdktfDirectory(directory string) bool { + if directory == filepath.FromSlash(fmt.Sprintf("%s/%s", LegacyIndexDirectory, CdktfIndexDirectory)) { + return true + } + + if directory == filepath.FromSlash(fmt.Sprintf("%s/%s", RegistryIndexDirectory, CdktfIndexDirectory)) { + return true + } + + for _, validCdktfLanguage := range ValidCdktfLanguages { + + if directory == filepath.FromSlash(fmt.Sprintf("%s/%s/%s", LegacyIndexDirectory, CdktfIndexDirectory, validCdktfLanguage)) { + return true + } + + if directory == filepath.FromSlash(fmt.Sprintf("%s/%s/%s", RegistryIndexDirectory, CdktfIndexDirectory, validCdktfLanguage)) { + return true + } + + for _, validLegacySubdirectory := range ValidLegacySubdirectories { + if directory == filepath.FromSlash(fmt.Sprintf("%s/%s/%s/%s", LegacyIndexDirectory, CdktfIndexDirectory, validCdktfLanguage, validLegacySubdirectory)) { + return true + } + } + + for _, validRegistrySubdirectory := range ValidRegistrySubdirectories { + if directory == filepath.FromSlash(fmt.Sprintf("%s/%s/%s/%s", RegistryIndexDirectory, CdktfIndexDirectory, validCdktfLanguage, validRegistrySubdirectory)) { + return true + } + } + } + + return false +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/check/file.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/check/file.go new file mode 100644 index 00000000..cb079b3a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/check/file.go @@ -0,0 +1,39 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package check + +import ( + "fmt" + "log" + "os" + "path/filepath" +) + +type FileOptions struct { + BasePath string +} + +func (opts *FileOptions) FullPath(path string) string { + if opts.BasePath != "" { + return filepath.Join(opts.BasePath, path) + } + + return path +} + +// FileSizeCheck verifies that documentation file is below the Terraform Registry storage limit. +func FileSizeCheck(fullpath string) error { + fi, err := os.Stat(fullpath) + + if err != nil { + return err + } + + log.Printf("[DEBUG] File %s size: %d (limit: %d)", fullpath, fi.Size(), RegistryMaximumSizeOfFile) + if fi.Size() >= int64(RegistryMaximumSizeOfFile) { + return fmt.Errorf("exceeded maximum (%d) size of documentation file for Terraform Registry: %d", RegistryMaximumSizeOfFile, fi.Size()) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/check/file_extension.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/check/file_extension.go new file mode 100644 index 00000000..dd5f37b6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/check/file_extension.go @@ -0,0 +1,64 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package check + +import ( + "fmt" + "path/filepath" + "strings" +) + +const ( + FileExtensionHtmlMarkdown = `.html.markdown` + FileExtensionHtmlMd = `.html.md` + FileExtensionMarkdown = `.markdown` + FileExtensionMd = `.md` +) + +var ValidLegacyFileExtensions = []string{ + FileExtensionHtmlMarkdown, + FileExtensionHtmlMd, + FileExtensionMarkdown, + FileExtensionMd, +} + +var ValidRegistryFileExtensions = []string{ + FileExtensionMd, +} + +// FileExtensionCheck checks if the file extension of the given path is valid. +func FileExtensionCheck(path string, validExtensions []string) error { + if !FilePathEndsWithExtensionFrom(path, validExtensions) { + return fmt.Errorf("file does not end with a valid extension, valid extensions: %v", ValidLegacyFileExtensions) + } + + return nil +} + +func FilePathEndsWithExtensionFrom(path string, validExtensions []string) bool { + for _, validExtension := range validExtensions { + if strings.HasSuffix(path, validExtension) { + return true + } + } + + return false +} + +// TrimFileExtension removes file extensions including those with multiple periods. +func TrimFileExtension(path string) string { + filename := filepath.Base(path) + + if filename == "." { + return "" + } + + dotIndex := strings.IndexByte(filename, '.') + + if dotIndex > 0 { + return filename[:dotIndex] + } + + return filename +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/check/file_mismatch.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/check/file_mismatch.go new file mode 100644 index 00000000..d65989fd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/check/file_mismatch.go @@ -0,0 +1,284 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package check + +import ( + "errors" + "fmt" + "log" + "os" + "sort" + + tfjson "github.com/hashicorp/terraform-json" +) + +type FileMismatchOptions struct { + *FileOptions + + IgnoreFileMismatch []string + + IgnoreFileMissing []string + + ProviderShortName string + + DatasourceEntries []os.DirEntry + + ResourceEntries []os.DirEntry + + FunctionEntries []os.DirEntry + + Schema *tfjson.ProviderSchema +} + +type FileMismatchCheck struct { + Options *FileMismatchOptions +} + +func NewFileMismatchCheck(opts *FileMismatchOptions) *FileMismatchCheck { + check := &FileMismatchCheck{ + Options: opts, + } + + if check.Options == nil { + check.Options = &FileMismatchOptions{} + } + + if check.Options.FileOptions == nil { + check.Options.FileOptions = &FileOptions{} + } + + return check +} + +func (check *FileMismatchCheck) Run() error { + var result error + + if check.Options.Schema == nil { + log.Printf("[DEBUG] Skipping file mismatch checks due to missing provider schema") + return nil + } + + if check.Options.ResourceEntries != nil { + err := check.ResourceFileMismatchCheck(check.Options.ResourceEntries, "resource", check.Options.Schema.ResourceSchemas) + result = errors.Join(result, err) + } + + if check.Options.DatasourceEntries != nil { + err := check.ResourceFileMismatchCheck(check.Options.DatasourceEntries, "datasource", check.Options.Schema.DataSourceSchemas) + result = errors.Join(result, err) + } + + if check.Options.FunctionEntries != nil { + err := check.FunctionFileMismatchCheck(check.Options.FunctionEntries, check.Options.Schema.Functions) + result = errors.Join(result, err) + } + + return result +} + +// ResourceFileMismatchCheck checks for mismatched files, either missing or extraneous, against the resource/datasouce schema +func (check *FileMismatchCheck) ResourceFileMismatchCheck(files []os.DirEntry, resourceType string, schemas map[string]*tfjson.Schema) error { + if len(files) == 0 { + log.Printf("[DEBUG] Skipping %s file mismatch checks due to missing file list", resourceType) + return nil + } + + if len(schemas) == 0 { + log.Printf("[DEBUG] Skipping %s file mismatch checks due to missing schemas", resourceType) + return nil + } + + var extraFiles []string + var missingFiles []string + + for _, file := range files { + log.Printf("[DEBUG] Found file %s", file.Name()) + if fileHasResource(schemas, check.Options.ProviderShortName, file.Name()) { + continue + } + + if check.IgnoreFileMismatch(file.Name()) { + continue + } + + log.Printf("[DEBUG] Found extraneous file %s", file.Name()) + extraFiles = append(extraFiles, file.Name()) + } + + for _, resourceName := range resourceNames(schemas) { + log.Printf("[DEBUG] Found %s %s", resourceType, resourceName) + if resourceHasFile(files, check.Options.ProviderShortName, resourceName) { + continue + } + + if check.IgnoreFileMissing(resourceName) { + continue + } + + log.Printf("[DEBUG] Missing file for %s %s", resourceType, resourceName) + missingFiles = append(missingFiles, resourceName) + } + + var result error + + for _, extraFile := range extraFiles { + err := fmt.Errorf("matching %s for documentation file (%s) not found, file is extraneous or incorrectly named", resourceType, extraFile) + result = errors.Join(result, err) + } + + for _, missingFile := range missingFiles { + err := fmt.Errorf("missing documentation file for %s: %s", resourceType, missingFile) + result = errors.Join(result, err) + } + + return result + +} + +// FunctionFileMismatchCheck checks for mismatched files, either missing or extraneous, against the function signature +func (check *FileMismatchCheck) FunctionFileMismatchCheck(files []os.DirEntry, functions map[string]*tfjson.FunctionSignature) error { + if len(files) == 0 { + log.Printf("[DEBUG] Skipping function file mismatch checks due to missing file list") + return nil + } + + if len(functions) == 0 { + log.Printf("[DEBUG] Skipping function file mismatch checks due to missing schemas") + return nil + } + + var extraFiles []string + var missingFiles []string + + for _, file := range files { + if fileHasFunction(functions, file.Name()) { + continue + } + + if check.IgnoreFileMismatch(file.Name()) { + continue + } + + extraFiles = append(extraFiles, file.Name()) + } + + for _, functionName := range functionNames(functions) { + if functionHasFile(files, functionName) { + continue + } + + if check.IgnoreFileMissing(functionName) { + continue + } + + missingFiles = append(missingFiles, functionName) + } + + var result error + + for _, extraFile := range extraFiles { + err := fmt.Errorf("matching function for documentation file (%s) not found, file is extraneous or incorrectly named", extraFile) + result = errors.Join(result, err) + } + + for _, missingFile := range missingFiles { + err := fmt.Errorf("missing documentation file for function: %s", missingFile) + result = errors.Join(result, err) + } + + return result + +} + +func (check *FileMismatchCheck) IgnoreFileMismatch(file string) bool { + for _, ignoreResourceName := range check.Options.IgnoreFileMismatch { + if ignoreResourceName == fileResourceName(check.Options.ProviderShortName, file) { + return true + } + } + + return false +} + +func (check *FileMismatchCheck) IgnoreFileMissing(resourceName string) bool { + for _, ignoreResourceName := range check.Options.IgnoreFileMissing { + if ignoreResourceName == resourceName { + return true + } + } + + return false +} + +func fileHasResource(schemaResources map[string]*tfjson.Schema, providerName, file string) bool { + if _, ok := schemaResources[fileResourceName(providerName, file)]; ok { + return true + } + + return false +} + +func fileHasFunction(functions map[string]*tfjson.FunctionSignature, file string) bool { + if _, ok := functions[TrimFileExtension(file)]; ok { + return true + } + + return false +} + +func fileResourceName(providerName, fileName string) string { + resourceSuffix := TrimFileExtension(fileName) + + return fmt.Sprintf("%s_%s", providerName, resourceSuffix) +} + +func resourceHasFile(files []os.DirEntry, providerName, resourceName string) bool { + var found bool + + for _, file := range files { + if fileResourceName(providerName, file.Name()) == resourceName { + found = true + break + } + } + + return found +} + +func functionHasFile(files []os.DirEntry, functionName string) bool { + var found bool + + for _, file := range files { + if TrimFileExtension(file.Name()) == functionName { + found = true + break + } + } + + return found +} + +func resourceNames(resources map[string]*tfjson.Schema) []string { + names := make([]string, 0, len(resources)) + + for name := range resources { + names = append(names, name) + } + + sort.Strings(names) + + return names +} + +func functionNames(functions map[string]*tfjson.FunctionSignature) []string { + names := make([]string, 0, len(functions)) + + for name := range functions { + names = append(names, name) + } + + sort.Strings(names) + + return names +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/check/frontmatter.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/check/frontmatter.go new file mode 100644 index 00000000..65ac43aa --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/check/frontmatter.go @@ -0,0 +1,104 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package check + +import ( + "bytes" + "fmt" + + "github.com/yuin/goldmark" + "github.com/yuin/goldmark/parser" + "go.abhg.dev/goldmark/frontmatter" +) + +type FrontMatterCheck struct { + Options *FrontMatterOptions +} + +// FrontMatterData represents the YAML frontmatter of Terraform Provider documentation. +type FrontMatterData struct { + Description *string `yaml:"description,omitempty"` + Layout *string `yaml:"layout,omitempty"` + PageTitle *string `yaml:"page_title,omitempty"` + SidebarCurrent *string `yaml:"sidebar_current,omitempty"` + Subcategory *string `yaml:"subcategory,omitempty"` +} + +// FrontMatterOptions represents configuration options for FrontMatter. +type FrontMatterOptions struct { + NoLayout bool + NoPageTitle bool + NoSidebarCurrent bool + NoSubcategory bool + RequireDescription bool + RequireLayout bool + RequirePageTitle bool +} + +func NewFrontMatterCheck(opts *FrontMatterOptions) *FrontMatterCheck { + check := &FrontMatterCheck{ + Options: opts, + } + + if check.Options == nil { + check.Options = &FrontMatterOptions{} + } + + return check +} + +func (check *FrontMatterCheck) Run(src []byte) error { + frontMatter := FrontMatterData{} + + md := goldmark.New( + goldmark.WithExtensions(&frontmatter.Extender{}), + ) + + ctx := parser.NewContext() + var buff bytes.Buffer + + err := md.Convert(src, &buff, parser.WithContext(ctx)) + if err != nil { + return err + } + d := frontmatter.Get(ctx) + if d == nil { + return fmt.Errorf("no frontmatter found") + } + + err = d.Decode(&frontMatter) + if err != nil { + return fmt.Errorf("error parsing YAML frontmatter: %w", err) + } + + if check.Options.NoLayout && frontMatter.Layout != nil { + return fmt.Errorf("YAML frontmatter should not contain layout") + } + + if check.Options.NoPageTitle && frontMatter.PageTitle != nil { + return fmt.Errorf("YAML frontmatter should not contain page_title") + } + + if check.Options.NoSidebarCurrent && frontMatter.SidebarCurrent != nil { + return fmt.Errorf("YAML frontmatter should not contain sidebar_current") + } + + if check.Options.NoSubcategory && frontMatter.Subcategory != nil { + return fmt.Errorf("YAML frontmatter should not contain subcategory") + } + + if check.Options.RequireDescription && frontMatter.Description == nil { + return fmt.Errorf("YAML frontmatter missing required description") + } + + if check.Options.RequireLayout && frontMatter.Layout == nil { + return fmt.Errorf("YAML frontmatter missing required layout") + } + + if check.Options.RequirePageTitle && frontMatter.PageTitle == nil { + return fmt.Errorf("YAML frontmatter missing required page_title") + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/check/provider_file.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/check/provider_file.go new file mode 100644 index 00000000..5358b669 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/check/provider_file.go @@ -0,0 +1,67 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package check + +import ( + "fmt" + "log" + "os" +) + +type ProviderFileOptions struct { + *FileOptions + + FrontMatter *FrontMatterOptions + ValidExtensions []string +} + +type ProviderFileCheck struct { + Options *ProviderFileOptions +} + +func NewProviderFileCheck(opts *ProviderFileOptions) *ProviderFileCheck { + check := &ProviderFileCheck{ + Options: opts, + } + + if check.Options == nil { + check.Options = &ProviderFileOptions{} + } + + if check.Options.FileOptions == nil { + check.Options.FileOptions = &FileOptions{} + } + + if check.Options.FrontMatter == nil { + check.Options.FrontMatter = &FrontMatterOptions{} + } + + return check +} + +func (check *ProviderFileCheck) Run(path string) error { + fullpath := check.Options.FullPath(path) + + log.Printf("[DEBUG] Checking file: %s", fullpath) + + if err := FileExtensionCheck(path, check.Options.ValidExtensions); err != nil { + return fmt.Errorf("%s: error checking file extension: %w", path, err) + } + + if err := FileSizeCheck(fullpath); err != nil { + return fmt.Errorf("%s: error checking file size: %w", path, err) + } + + content, err := os.ReadFile(fullpath) + + if err != nil { + return fmt.Errorf("%s: error reading file: %w", path, err) + } + + if err := NewFrontMatterCheck(check.Options.FrontMatter).Run(content); err != nil { + return fmt.Errorf("%s: error checking file frontmatter: %w", path, err) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/generate.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/generate.go index 1a0c7f15..77dbc96f 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/generate.go +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/generate.go @@ -72,7 +72,7 @@ func (cmd *generateCmd) Help() string { func (cmd *generateCmd) Flags() *flag.FlagSet { fs := flag.NewFlagSet("generate", flag.ExitOnError) - fs.StringVar(&cmd.flagProviderName, "provider-name", "", "provider name, as used in Terraform configurations") + fs.StringVar(&cmd.flagProviderName, "provider-name", "", "provider name, as used in Terraform configurations; defaults to the --provider-dir short name (after removing `terraform-provider-` prefix)") fs.StringVar(&cmd.flagProviderDir, "provider-dir", "", "relative or absolute path to the root provider code directory when running the command outside the root provider code directory") fs.StringVar(&cmd.flagProvidersSchema, "providers-schema", "", "path to the providers schema JSON file, which contains the output of the terraform providers schema -json command. Setting this flag will skip building the provider and calling Terraform CLI") fs.StringVar(&cmd.flagRenderedProviderName, "rendered-provider-name", "", "provider name, as generated in documentation (ex. page titles, ...)") diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/migrate.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/migrate.go index 8248b9ec..14e39ec1 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/migrate.go +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/migrate.go @@ -17,6 +17,7 @@ type migrateCmd struct { flagProviderDir string flagTemplatesDir string flagExamplesDir string + flagProviderName string } func (cmd *migrateCmd) Synopsis() string { @@ -67,6 +68,8 @@ func (cmd *migrateCmd) Flags() *flag.FlagSet { fs.StringVar(&cmd.flagProviderDir, "provider-dir", "", "relative or absolute path to the root provider code directory; this will default to the current working directory if not set") fs.StringVar(&cmd.flagTemplatesDir, "templates-dir", "templates", "new website templates directory based on provider-dir; files will be migrated to this directory") fs.StringVar(&cmd.flagExamplesDir, "examples-dir", "examples", "examples directory based on provider-dir; extracted code examples will be migrated to this directory") + fs.StringVar(&cmd.flagProviderName, "provider-name", "", "provider name, as used in Terraform configurations; defaults to the --provider-dir short name (after removing `terraform-provider-` prefix)") + return fs } @@ -87,6 +90,7 @@ func (cmd *migrateCmd) runInternal() error { cmd.flagProviderDir, cmd.flagTemplatesDir, cmd.flagExamplesDir, + cmd.flagProviderName, ) if err != nil { return fmt.Errorf("unable to migrate website: %w", err) diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/validate.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/validate.go index c4a406cf..55107f3c 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/validate.go +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/validate.go @@ -4,6 +4,7 @@ package cmd import ( + "errors" "flag" "fmt" "strings" @@ -13,10 +14,15 @@ import ( type validateCmd struct { commonCmd + + flagProviderName string + flagProviderDir string + flagProvidersSchema string + tfVersion string } func (cmd *validateCmd) Synopsis() string { - return "validates a plugin website for the current directory" + return "validates a plugin website" } func (cmd *validateCmd) Help() string { @@ -59,6 +65,10 @@ func (cmd *validateCmd) Help() string { func (cmd *validateCmd) Flags() *flag.FlagSet { fs := flag.NewFlagSet("validate", flag.ExitOnError) + fs.StringVar(&cmd.flagProviderName, "provider-name", "", "provider name, as used in Terraform configurations; defaults to the --provider-dir short name (after removing `terraform-provider-` prefix)") + fs.StringVar(&cmd.flagProviderDir, "provider-dir", "", "relative or absolute path to the root provider code directory; this will default to the current working directory if not set") + fs.StringVar(&cmd.flagProvidersSchema, "providers-schema", "", "path to the providers schema JSON file, which contains the output of the terraform providers schema -json command. Setting this flag will skip building the provider and calling Terraform CLI") + fs.StringVar(&cmd.tfVersion, "tf-version", "", "terraform binary version to download. If not provided, will look for a terraform binary in the local environment. If not found in the environment, will download the latest version of Terraform") return fs } @@ -74,9 +84,14 @@ func (cmd *validateCmd) Run(args []string) int { } func (cmd *validateCmd) runInternal() error { - err := provider.Validate(cmd.ui) + err := provider.Validate(cmd.ui, + cmd.flagProviderDir, + cmd.flagProviderName, + cmd.flagProvidersSchema, + cmd.tfVersion, + ) if err != nil { - return fmt.Errorf("unable to validate website: %w", err) + return errors.Join(errors.New("validation errors found: "), err) } return nil diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/functionmd/render.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/functionmd/render.go similarity index 97% rename from vendor/github.com/hashicorp/terraform-plugin-docs/functionmd/render.go rename to vendor/github.com/hashicorp/terraform-plugin-docs/internal/functionmd/render.go index fdea3a82..7b4951b7 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-docs/functionmd/render.go +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/functionmd/render.go @@ -10,7 +10,7 @@ import ( tfjson "github.com/hashicorp/terraform-json" - "github.com/hashicorp/terraform-plugin-docs/schemamd" + "github.com/hashicorp/terraform-plugin-docs/internal/schemamd" ) // RenderArguments returns a Markdown formatted string of the function arguments. diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/mdplain/mdplain.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/mdplain/mdplain.go index ded53b65..60a0ede9 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/mdplain/mdplain.go +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/mdplain/mdplain.go @@ -3,13 +3,25 @@ package mdplain -import "github.com/russross/blackfriday" +import ( + "bytes" -// Clean runs a VERY naive cleanup of markdown text to make it more palatable as plain text. -func PlainMarkdown(md string) (string, error) { - pt := &Text{} - - html := blackfriday.MarkdownOptions([]byte(md), pt, blackfriday.Options{}) + "github.com/yuin/goldmark" + "github.com/yuin/goldmark/extension" +) - return string(html), nil +// Clean runs a VERY naive cleanup of markdown text to make it more palatable as plain text. +func PlainMarkdown(markdown string) (string, error) { + var buf bytes.Buffer + extensions := []goldmark.Extender{ + extension.Linkify, + } + md := goldmark.New( + goldmark.WithExtensions(extensions...), + goldmark.WithRenderer(NewTextRenderer()), + ) + if err := md.Convert([]byte(markdown), &buf); err != nil { + return "", err + } + return buf.String(), nil } diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/mdplain/renderer.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/mdplain/renderer.go index 660cdae5..93bd5ec7 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/mdplain/renderer.go +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/mdplain/renderer.go @@ -5,175 +5,101 @@ package mdplain import ( "bytes" + "io" - "github.com/russross/blackfriday" + "github.com/yuin/goldmark/ast" + extAST "github.com/yuin/goldmark/extension/ast" + "github.com/yuin/goldmark/renderer" ) -type Text struct{} - -func TextRenderer() blackfriday.Renderer { - return &Text{} -} - -func (options *Text) GetFlags() int { - return 0 -} - -func (options *Text) TitleBlock(out *bytes.Buffer, text []byte) { - text = bytes.TrimPrefix(text, []byte("% ")) - text = bytes.Replace(text, []byte("\n% "), []byte("\n"), -1) - out.Write(text) - out.WriteString("\n") -} - -func (options *Text) Header(out *bytes.Buffer, text func() bool, level int, id string) { - marker := out.Len() - doubleSpace(out) - - if !text() { - out.Truncate(marker) - return +type TextRender struct{} + +func NewTextRenderer() *TextRender { + return &TextRender{} +} + +func (r *TextRender) Render(w io.Writer, source []byte, n ast.Node) error { + out := bytes.NewBuffer([]byte{}) + err := ast.Walk(n, func(node ast.Node, entering bool) (ast.WalkStatus, error) { + if !entering || node.Type() == ast.TypeDocument { + return ast.WalkContinue, nil + } + + switch node := node.(type) { + case *ast.Blockquote, *ast.Heading: + doubleSpace(out) + out.Write(node.Text(source)) + return ast.WalkSkipChildren, nil + case *ast.ThematicBreak: + doubleSpace(out) + return ast.WalkSkipChildren, nil + case *ast.CodeBlock: + doubleSpace(out) + for i := 0; i < node.Lines().Len(); i++ { + line := node.Lines().At(i) + out.Write(line.Value(source)) + } + return ast.WalkSkipChildren, nil + case *ast.FencedCodeBlock: + doubleSpace(out) + doubleSpace(out) + for i := 0; i < node.Lines().Len(); i++ { + line := node.Lines().At(i) + _, _ = out.Write(line.Value(source)) + } + return ast.WalkSkipChildren, nil + case *ast.List: + doubleSpace(out) + return ast.WalkContinue, nil + case *ast.Paragraph: + doubleSpace(out) + if node.Text(source)[0] == '|' { // Write tables as-is. + for i := 0; i < node.Lines().Len(); i++ { + line := node.Lines().At(i) + out.Write(line.Value(source)) + } + return ast.WalkSkipChildren, nil + } + return ast.WalkContinue, nil + case *extAST.Strikethrough: + out.Write(node.Text(source)) + return ast.WalkContinue, nil + case *ast.AutoLink: + out.Write(node.URL(source)) + return ast.WalkSkipChildren, nil + case *ast.CodeSpan: + out.Write(node.Text(source)) + return ast.WalkSkipChildren, nil + case *ast.Link: + _, err := out.Write(node.Text(source)) + if !isRelativeLink(node.Destination) { + out.WriteString(" ") + out.Write(node.Destination) + } + return ast.WalkSkipChildren, err + case *ast.Text: + out.Write(node.Text(source)) + if node.SoftLineBreak() { + doubleSpace(out) + } + return ast.WalkContinue, nil + case *ast.Image: + return ast.WalkSkipChildren, nil + + } + return ast.WalkContinue, nil + }) + if err != nil { + return err } -} - -func (options *Text) BlockHtml(out *bytes.Buffer, text []byte) { - doubleSpace(out) - out.Write(text) - out.WriteByte('\n') -} - -func (options *Text) HRule(out *bytes.Buffer) { - doubleSpace(out) -} - -func (options *Text) BlockCode(out *bytes.Buffer, text []byte, lang string) { - options.BlockCodeNormal(out, text, lang) -} - -func (options *Text) BlockCodeNormal(out *bytes.Buffer, text []byte, lang string) { - doubleSpace(out) - out.Write(text) -} - -func (options *Text) BlockQuote(out *bytes.Buffer, text []byte) { - doubleSpace(out) - out.Write(text) -} - -func (options *Text) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) { - doubleSpace(out) - out.Write(header) - out.Write(body) -} - -func (options *Text) TableRow(out *bytes.Buffer, text []byte) { - doubleSpace(out) - out.Write(text) -} - -func (options *Text) TableHeaderCell(out *bytes.Buffer, text []byte, align int) { - doubleSpace(out) - out.Write(text) -} - -func (options *Text) TableCell(out *bytes.Buffer, text []byte, align int) { - doubleSpace(out) - out.Write(text) -} - -func (options *Text) Footnotes(out *bytes.Buffer, text func() bool) { - options.HRule(out) - options.List(out, text, 0) -} - -func (options *Text) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) { - out.Write(text) -} - -func (options *Text) List(out *bytes.Buffer, text func() bool, flags int) { - marker := out.Len() - doubleSpace(out) - - if !text() { - out.Truncate(marker) - return + _, err = w.Write(out.Bytes()) + if err != nil { + return err } + return nil } -func (options *Text) ListItem(out *bytes.Buffer, text []byte, flags int) { - out.Write(text) -} - -func (options *Text) Paragraph(out *bytes.Buffer, text func() bool) { - marker := out.Len() - doubleSpace(out) - - if !text() { - out.Truncate(marker) - return - } -} - -func (options *Text) AutoLink(out *bytes.Buffer, link []byte, kind int) { - out.Write(link) -} - -func (options *Text) CodeSpan(out *bytes.Buffer, text []byte) { - out.Write(text) -} - -func (options *Text) DoubleEmphasis(out *bytes.Buffer, text []byte) { - out.Write(text) -} - -func (options *Text) Emphasis(out *bytes.Buffer, text []byte) { - if len(text) == 0 { - return - } - out.Write(text) -} - -func (options *Text) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) {} - -func (options *Text) LineBreak(out *bytes.Buffer) {} - -func (options *Text) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) { - out.Write(content) - if !isRelativeLink(link) { - out.WriteString(" ") - out.Write(link) - } -} - -func (options *Text) RawHtmlTag(out *bytes.Buffer, text []byte) {} - -func (options *Text) TripleEmphasis(out *bytes.Buffer, text []byte) { - out.Write(text) -} - -func (options *Text) StrikeThrough(out *bytes.Buffer, text []byte) { - out.Write(text) -} - -func (options *Text) FootnoteRef(out *bytes.Buffer, ref []byte, id int) {} - -func (options *Text) Entity(out *bytes.Buffer, entity []byte) { - out.Write(entity) -} - -func (options *Text) NormalText(out *bytes.Buffer, text []byte) { - out.Write(text) -} - -func (options *Text) Smartypants(out *bytes.Buffer, text []byte) {} - -func (options *Text) DocumentHeader(out *bytes.Buffer) {} - -func (options *Text) DocumentFooter(out *bytes.Buffer) {} - -func (options *Text) TocHeader(text []byte, level int) {} - -func (options *Text) TocFinalize() {} +func (r *TextRender) AddOptions(...renderer.Option) {} func doubleSpace(out *bytes.Buffer) { if out.Len() > 0 { diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/generate.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/generate.go index d0c53a54..d0c3c965 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/generate.go +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/generate.go @@ -8,7 +8,6 @@ import ( "fmt" "os" "os/exec" - "path" "path/filepath" "runtime" "strings" @@ -455,7 +454,7 @@ func (g *generator) renderStaticWebsite(providerSchema *tfjson.ProviderSchema) e // Remove subdirectories managed by tfplugindocs if file.IsDir() && slices.Contains(managedWebsiteSubDirectories, file.Name()) { g.infof("removing directory: %q", file.Name()) - err = os.RemoveAll(path.Join(g.ProviderDocsDir(), file.Name())) + err = os.RemoveAll(filepath.Join(g.ProviderDocsDir(), file.Name())) if err != nil { return fmt.Errorf("unable to remove directory %q from rendered website directory: %w", file.Name(), err) } @@ -465,7 +464,7 @@ func (g *generator) renderStaticWebsite(providerSchema *tfjson.ProviderSchema) e // Remove files managed by tfplugindocs if !file.IsDir() && slices.Contains(managedWebsiteFiles, file.Name()) { g.infof("removing file: %q", file.Name()) - err = os.RemoveAll(path.Join(g.ProviderDocsDir(), file.Name())) + err = os.RemoveAll(filepath.Join(g.ProviderDocsDir(), file.Name())) if err != nil { return fmt.Errorf("unable to remove file %q from rendered website directory: %w", file.Name(), err) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/logger.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/logger.go new file mode 100644 index 00000000..366812bc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/logger.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package provider + +import ( + "fmt" + + "github.com/hashicorp/cli" +) + +type Logger struct { + ui cli.Ui +} + +func NewLogger(ui cli.Ui) *Logger { + return &Logger{ui} +} + +func (l *Logger) infof(format string, args ...interface{}) { + l.ui.Info(fmt.Sprintf(format, args...)) +} + +//nolint:unused +func (l *Logger) warnf(format string, args ...interface{}) { + l.ui.Warn(fmt.Sprintf(format, args...)) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/migrate.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/migrate.go index e8a77f46..babe2127 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/migrate.go +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/migrate.go @@ -27,6 +27,8 @@ type migrator struct { templatesDir string examplesDir string + providerName string + ui cli.Ui } @@ -38,7 +40,7 @@ func (m *migrator) warnf(format string, a ...interface{}) { m.ui.Warn(fmt.Sprintf(format, a...)) } -func Migrate(ui cli.Ui, providerDir string, templatesDir string, examplesDir string) error { +func Migrate(ui cli.Ui, providerDir string, templatesDir string, examplesDir string, providerName string) error { // Ensure provider directory is resolved absolute path if providerDir == "" { wd, err := os.Getwd() @@ -69,6 +71,11 @@ func Migrate(ui cli.Ui, providerDir string, templatesDir string, examplesDir str return fmt.Errorf("expected %q to be a directory", providerDir) } + // Default providerName to provider directory name + if providerName == "" { + providerName = filepath.Base(providerDir) + } + // Determine website directory websiteDir, err := determineWebsiteDir(providerDir) if err != nil { @@ -80,6 +87,7 @@ func Migrate(ui cli.Ui, providerDir string, templatesDir string, examplesDir str templatesDir: templatesDir, examplesDir: examplesDir, websiteDir: websiteDir, + providerName: providerName, ui: ui, } @@ -172,28 +180,43 @@ func (m *migrator) MigrateTemplate(relDir string) fs.WalkDirFunc { } baseName, _, _ := strings.Cut(d.Name(), ".") + shortName := providerShortName(m.providerName) + fileName := strings.TrimPrefix(baseName, shortName+"_") var exampleRelDir string - if baseName == "index" { + if fileName == "index" { exampleRelDir = relDir } else { - exampleRelDir = filepath.Join(relDir, baseName) + exampleRelDir = filepath.Join(relDir, fileName) } - templateFilePath := filepath.Join(m.ProviderTemplatesDir(), relDir, baseName+".md.tmpl") + templateFilePath := filepath.Join(m.ProviderTemplatesDir(), relDir, fileName+".md.tmpl") err = os.MkdirAll(filepath.Dir(templateFilePath), 0755) if err != nil { return fmt.Errorf("unable to create directory %q: %w", templateFilePath, err) } + templateFile, err := os.OpenFile(templateFilePath, os.O_WRONLY|os.O_CREATE, 0600) + + if err != nil { + return fmt.Errorf("unable to open file %q: %w", templateFilePath, err) + } + + defer func(f *os.File) { + err := f.Close() + if err != nil { + m.warnf("unable to close file %q: %q", f.Name(), err) + } + }(templateFile) + m.infof("extracting YAML frontmatter to %q", templateFilePath) - err = m.ExtractFrontMatter(data, relDir, templateFilePath) + err = m.ExtractFrontMatter(data, relDir, templateFile) if err != nil { return fmt.Errorf("unable to extract front matter to %q: %w", templateFilePath, err) } m.infof("extracting code examples from %q", d.Name()) - err = m.ExtractCodeExamples(data, exampleRelDir, templateFilePath) + err = m.ExtractCodeExamples(data, exampleRelDir, templateFile) if err != nil { return fmt.Errorf("unable to extract code examples from %q: %w", templateFilePath, err) } @@ -203,29 +226,18 @@ func (m *migrator) MigrateTemplate(relDir string) fs.WalkDirFunc { } -func (m *migrator) ExtractFrontMatter(content []byte, relDir string, templateFilePath string) error { - templateFile, err := os.OpenFile(templateFilePath, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600) - if err != nil { - return fmt.Errorf("unable to open file %q: %w", templateFilePath, err) - } - defer func(f *os.File) { - err := f.Close() - if err != nil { - m.warnf("unable to close file %q: %q", templateFilePath, err) - } - }(templateFile) - +func (m *migrator) ExtractFrontMatter(content []byte, relDir string, templateFile *os.File) error { fileScanner := bufio.NewScanner(bytes.NewReader(content)) fileScanner.Split(bufio.ScanLines) hasFirstLine := fileScanner.Scan() if !hasFirstLine || fileScanner.Text() != "---" { - m.warnf("no frontmatter found in %q", templateFilePath) + m.warnf("no frontmatter found in %q", templateFile.Name()) return nil } - _, err = templateFile.WriteString(fileScanner.Text() + "\n") + _, err := templateFile.WriteString(fileScanner.Text() + "\n") if err != nil { - return fmt.Errorf("unable to append frontmatter to %q: %w", templateFilePath, err) + return fmt.Errorf("unable to append frontmatter to %q: %w", templateFile.Name(), err) } exited := false for fileScanner.Scan() { @@ -235,7 +247,7 @@ func (m *migrator) ExtractFrontMatter(content []byte, relDir string, templateFil } _, err = templateFile.WriteString(fileScanner.Text() + "\n") if err != nil { - return fmt.Errorf("unable to append frontmatter to %q: %w", templateFilePath, err) + return fmt.Errorf("unable to append frontmatter to %q: %w", templateFile.Name(), err) } if fileScanner.Text() == "---" { exited = true @@ -244,7 +256,7 @@ func (m *migrator) ExtractFrontMatter(content []byte, relDir string, templateFil } if !exited { - return fmt.Errorf("cannot find ending of frontmatter block in %q", templateFilePath) + return fmt.Errorf("cannot find ending of frontmatter block in %q", templateFile.Name()) } // add comment to end of front matter briefly explaining template functionality @@ -254,24 +266,13 @@ func (m *migrator) ExtractFrontMatter(content []byte, relDir string, templateFil _, err = templateFile.WriteString(migrateProviderTemplateComment + "\n") } if err != nil { - return fmt.Errorf("unable to append template comment to %q: %w", templateFilePath, err) + return fmt.Errorf("unable to append template comment to %q: %w", templateFile.Name(), err) } return nil } -func (m *migrator) ExtractCodeExamples(content []byte, newRelDir string, templateFilePath string) error { - templateFile, err := os.OpenFile(templateFilePath, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600) - if err != nil { - return fmt.Errorf("unable to open file %q: %w", templateFilePath, err) - } - defer func(f *os.File) { - err := f.Close() - if err != nil { - m.warnf("unable to close file %q: %q", templateFilePath, err) - } - }(templateFile) - +func (m *migrator) ExtractCodeExamples(content []byte, newRelDir string, templateFile *os.File) error { md := newMarkdownRenderer() p := md.Parser() root := p.Parse(text.NewReader(content)) @@ -279,7 +280,7 @@ func (m *migrator) ExtractCodeExamples(content []byte, newRelDir string, templat exampleCount := 0 importCount := 0 - err = ast.Walk(root, func(node ast.Node, enter bool) (ast.WalkStatus, error) { + err := ast.Walk(root, func(node ast.Node, enter bool) (ast.WalkStatus, error) { // skip the root node if !enter || node.Type() == ast.TypeDocument { return ast.WalkContinue, nil @@ -294,20 +295,20 @@ func (m *migrator) ExtractCodeExamples(content []byte, newRelDir string, templat exampleCount++ ext = ".tf" exampleName = "example_" + strconv.Itoa(exampleCount) + ext - examplePath = filepath.Join(m.ProviderExamplesDir(), newRelDir, exampleName) + examplePath = filepath.Join(m.examplesDir, newRelDir, exampleName) template = fmt.Sprintf("{{tffile \"%s\"}}", examplePath) - m.infof("creating example file %q", examplePath) + m.infof("creating example file %q", filepath.Join(m.providerDir, examplePath)) case "console": importCount++ ext = ".sh" exampleName = "import_" + strconv.Itoa(importCount) + ext - examplePath = filepath.Join(m.ProviderExamplesDir(), newRelDir, exampleName) + examplePath = filepath.Join(m.examplesDir, newRelDir, exampleName) template = fmt.Sprintf("{{codefile \"shell\" \"%s\"}}", examplePath) - m.infof("creating import file %q", examplePath) + m.infof("creating import file %q", filepath.Join(m.providerDir, examplePath)) default: // Render node as is m.infof("skipping code block with unknown language %q", lang) - err = md.Renderer().Render(templateFile, content, node) + err := md.Renderer().Render(templateFile, content, node) if err != nil { return ast.WalkStop, fmt.Errorf("unable to render node: %w", err) } @@ -322,7 +323,7 @@ func (m *migrator) ExtractCodeExamples(content []byte, newRelDir string, templat } // create example file from code block - err = writeFile(examplePath, codeBuf.String()) + err := writeFile(examplePath, codeBuf.String()) if err != nil { return ast.WalkStop, fmt.Errorf("unable to write file %q: %w", examplePath, err) } @@ -337,7 +338,7 @@ func (m *migrator) ExtractCodeExamples(content []byte, newRelDir string, templat } // Render non-code nodes as is - err = md.Renderer().Render(templateFile, content, node) + err := md.Renderer().Render(templateFile, content, node) if err != nil { return ast.WalkStop, fmt.Errorf("unable to render node: %w", err) } @@ -353,9 +354,9 @@ func (m *migrator) ExtractCodeExamples(content []byte, newRelDir string, templat _, err = templateFile.WriteString("\n") if err != nil { - return fmt.Errorf("unable to write to template %q: %w", templateFilePath, err) + return fmt.Errorf("unable to write to template %q: %w", templateFile.Name(), err) } - m.infof("finished creating template %q", templateFilePath) + m.infof("finished creating template %q", templateFile.Name()) return nil } diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/schema.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/schema.go new file mode 100644 index 00000000..3338fe98 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/schema.go @@ -0,0 +1,136 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package provider + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "runtime" + + "github.com/hashicorp/go-version" + install "github.com/hashicorp/hc-install" + "github.com/hashicorp/hc-install/checkpoint" + "github.com/hashicorp/hc-install/fs" + "github.com/hashicorp/hc-install/product" + "github.com/hashicorp/hc-install/releases" + "github.com/hashicorp/hc-install/src" + "github.com/hashicorp/terraform-exec/tfexec" + tfjson "github.com/hashicorp/terraform-json" +) + +func TerraformProviderSchemaFromTerraform(ctx context.Context, providerName, providerDir, tfVersion string, l *Logger) (*tfjson.ProviderSchema, error) { + var err error + + shortName := providerShortName(providerName) + + tmpDir, err := os.MkdirTemp("", "tfws") + if err != nil { + return nil, fmt.Errorf("unable to create temporary provider install directory %q: %w", tmpDir, err) + } + defer os.RemoveAll(tmpDir) + + l.infof("compiling provider %q", shortName) + providerPath := fmt.Sprintf("plugins/registry.terraform.io/hashicorp/%s/0.0.1/%s_%s", shortName, runtime.GOOS, runtime.GOARCH) + outFile := filepath.Join(tmpDir, providerPath, fmt.Sprintf("terraform-provider-%s", shortName)) + switch runtime.GOOS { + case "windows": + outFile = outFile + ".exe" + } + buildCmd := exec.Command("go", "build", "-o", outFile) + buildCmd.Dir = providerDir + // TODO: constrain env here to make it a little safer? + _, err = runCmd(buildCmd) + if err != nil { + return nil, fmt.Errorf("unable to execute go build command: %w", err) + } + + err = writeFile(filepath.Join(tmpDir, "provider.tf"), fmt.Sprintf(` +provider %[1]q { +} +`, shortName)) + if err != nil { + return nil, fmt.Errorf("unable to write provider.tf file: %w", err) + } + + i := install.NewInstaller() + var sources []src.Source + if tfVersion != "" { + l.infof("downloading Terraform CLI binary version from releases.hashicorp.com: %s", tfVersion) + sources = []src.Source{ + &releases.ExactVersion{ + Product: product.Terraform, + Version: version.Must(version.NewVersion(tfVersion)), + InstallDir: tmpDir, + }, + } + } else { + l.infof("using Terraform CLI binary from PATH if available, otherwise downloading latest Terraform CLI binary") + sources = []src.Source{ + &fs.AnyVersion{ + Product: &product.Terraform, + }, + &checkpoint.LatestVersion{ + InstallDir: tmpDir, + Product: product.Terraform, + }, + } + } + + tfBin, err := i.Ensure(context.Background(), sources) + if err != nil { + return nil, fmt.Errorf("unable to download Terraform binary: %w", err) + } + + tf, err := tfexec.NewTerraform(tmpDir, tfBin) + if err != nil { + return nil, fmt.Errorf("unable to create new terraform exec instance: %w", err) + } + + l.infof("running terraform init") + err = tf.Init(ctx, tfexec.Get(false), tfexec.PluginDir("./plugins")) + if err != nil { + return nil, fmt.Errorf("unable to run terraform init on provider: %w", err) + } + + l.infof("getting provider schema") + schemas, err := tf.ProvidersSchema(ctx) + if err != nil { + return nil, fmt.Errorf("unable to retrieve provider schema from terraform exec: %w", err) + } + + if ps, ok := schemas.Schemas[shortName]; ok { + return ps, nil + } + + if ps, ok := schemas.Schemas["registry.terraform.io/hashicorp/"+shortName]; ok { + return ps, nil + } + + return nil, fmt.Errorf("unable to find schema in JSON for provider %q", shortName) +} + +func TerraformProviderSchemaFromFile(providerName, providersSchemaPath string, l *Logger) (*tfjson.ProviderSchema, error) { + var err error + + shortName := providerShortName(providerName) + + l.infof("getting provider schema") + schemas, err := extractSchemaFromFile(providersSchemaPath) + if err != nil { + return nil, fmt.Errorf("unable to retrieve provider schema from JSON file: %w", err) + } + + if ps, ok := schemas.Schemas[shortName]; ok { + return ps, nil + } + + if ps, ok := schemas.Schemas["registry.terraform.io/hashicorp/"+shortName]; ok { + return ps, nil + } + + return nil, fmt.Errorf("unable to find schema in JSON for provider %q", shortName) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/template.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/template.go index 10f2e655..58766489 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/template.go +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/template.go @@ -16,10 +16,11 @@ import ( tfjson "github.com/hashicorp/terraform-json" - "github.com/hashicorp/terraform-plugin-docs/functionmd" + "github.com/hashicorp/terraform-plugin-docs/internal/schemamd" + + "github.com/hashicorp/terraform-plugin-docs/internal/functionmd" "github.com/hashicorp/terraform-plugin-docs/internal/mdplain" "github.com/hashicorp/terraform-plugin-docs/internal/tmplfuncs" - "github.com/hashicorp/terraform-plugin-docs/schemamd" ) const ( diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/util.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/util.go index 21063dc8..7a3ec336 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/util.go +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/util.go @@ -86,9 +86,13 @@ func resourceSchema(schemas map[string]*tfjson.Schema, providerShortName, templa func writeFile(path string, data string) error { dir, _ := filepath.Split(path) - err := os.MkdirAll(dir, 0755) - if err != nil { - return fmt.Errorf("unable to make dir %q: %w", dir, err) + + var err error + if dir != "" { + err = os.MkdirAll(dir, 0755) + if err != nil { + return fmt.Errorf("unable to make dir %q: %w", dir, err) + } } err = os.WriteFile(path, []byte(data), 0644) @@ -99,6 +103,7 @@ func writeFile(path string, data string) error { return nil } +//nolint:unparam func runCmd(cmd *exec.Cmd) ([]byte, error) { output, err := cmd.CombinedOutput() if err != nil { diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/validate.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/validate.go index c93ff3ec..a72be373 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/validate.go +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/validate.go @@ -4,269 +4,363 @@ package provider import ( + "context" + "errors" "fmt" + "log" "os" "path/filepath" - "strings" + "github.com/bmatcuk/doublestar/v4" "github.com/hashicorp/cli" + tfjson "github.com/hashicorp/terraform-json" + + "github.com/hashicorp/terraform-plugin-docs/internal/check" ) -func Validate(ui cli.Ui) error { - dirExists := func(name string) bool { - if _, err := os.Stat(name); err != nil { - return false - } +const ( + FileExtensionHtmlMarkdown = `.html.markdown` + FileExtensionHtmlMd = `.html.md` + FileExtensionMarkdown = `.markdown` + FileExtensionMd = `.md` - return true - } + DocumentationGlobPattern = `{docs/index.md,docs/{,cdktf/}{data-sources,guides,resources,functions}/**/*,website/docs/**/*}` + DocumentationDirGlobPattern = `{docs/{,cdktf/}{data-sources,guides,resources,functions}{,/*},website/docs/**/*}` +) - switch { - default: - ui.Warn("no website detected, exiting") - case dirExists("templates"): - ui.Info("detected templates directory, running checks...") - err := validateTemplates(ui, "templates") - if err != nil { - return err - } - if dirExists("examples") { - ui.Info("detected examples directory for templates, running checks...") - err = validateExamples(ui, "examples") - if err != nil { - return err - } - } - return err - case dirExists("docs"): - ui.Info("detected static docs directory, running checks") - return validateStaticDocs(ui, "docs") - case dirExists("website"): - ui.Info("detected legacy website directory, running checks") - return validateLegacyWebsite(ui, "website") - } +var ValidLegacyFileExtensions = []string{ + FileExtensionHtmlMarkdown, + FileExtensionHtmlMd, + FileExtensionMarkdown, + FileExtensionMd, +} - return nil +var ValidRegistryFileExtensions = []string{ + FileExtensionMd, } -func validateExamples(ui cli.Ui, dir string) error { - return nil +var LegacyFrontMatterOptions = &check.FrontMatterOptions{ + NoSidebarCurrent: true, + RequireDescription: true, + RequireLayout: true, + RequirePageTitle: true, } -func validateTemplates(ui cli.Ui, dir string) error { - checks := []check{ - checkAllowedFiles( - "index.md", - "index.md.tmpl", - ), - checkAllowedDirs( - "data-sources", - "guides", - "functions", - "resources", - ), - checkBlockedExtensions( - ".html.md.tmpl", - ), - checkAllowedExtensions( - ".md", - ".md.tmpl", - ), - } - issues := []issue{} - for _, c := range checks { - checkIssues, err := c(dir) +var LegacyIndexFrontMatterOptions = &check.FrontMatterOptions{ + NoSidebarCurrent: true, + NoSubcategory: true, + RequireDescription: true, + RequireLayout: true, + RequirePageTitle: true, +} + +var LegacyGuideFrontMatterOptions = &check.FrontMatterOptions{ + NoSidebarCurrent: true, + RequireDescription: true, + RequireLayout: true, + RequirePageTitle: true, +} + +var RegistryFrontMatterOptions = &check.FrontMatterOptions{ + NoLayout: true, + NoSidebarCurrent: true, +} + +var RegistryIndexFrontMatterOptions = &check.FrontMatterOptions{ + NoLayout: true, + NoSidebarCurrent: true, + NoSubcategory: true, +} + +var RegistryGuideFrontMatterOptions = &check.FrontMatterOptions{ + NoLayout: true, + NoSidebarCurrent: true, + RequirePageTitle: true, +} + +type validator struct { + providerName string + providerDir string + providersSchemaPath string + + tfVersion string + providerSchema *tfjson.ProviderSchema + + logger *Logger +} + +func Validate(ui cli.Ui, providerDir, providerName, providersSchemaPath, tfversion string) error { + // Ensure provider directory is resolved absolute path + if providerDir == "" { + wd, err := os.Getwd() + if err != nil { - return err + return fmt.Errorf("error getting working directory: %w", err) + } + + providerDir = wd + } else { + absProviderDir, err := filepath.Abs(providerDir) + + if err != nil { + return fmt.Errorf("error getting absolute path with provider directory %q: %w", providerDir, err) } - issues = append(issues, checkIssues...) + + providerDir = absProviderDir } - for _, issue := range issues { - ui.Warn(fmt.Sprintf("%s: %s", issue.file, issue.message)) + + // Verify provider directory + providerDirFileInfo, err := os.Stat(providerDir) + + if err != nil { + return fmt.Errorf("error getting information for provider directory %q: %w", providerDir, err) + } + + if !providerDirFileInfo.IsDir() { + return fmt.Errorf("expected %q to be a directory", providerDir) } - if len(issues) > 0 { - return fmt.Errorf("invalid templates directory") + + v := &validator{ + providerName: providerName, + providerDir: providerDir, + providersSchemaPath: providersSchemaPath, + tfVersion: tfversion, + + logger: NewLogger(ui), } - return nil + + ctx := context.Background() + + return v.validate(ctx) } -func validateStaticDocs(ui cli.Ui, dir string) error { - checks := []check{ - checkAllowedFiles( - "index.md", - ), - checkAllowedDirs( - "data-sources", - "guides", - "functions", - "resources", - "cdktf", - ), - checkBlockedExtensions( - ".html.md.tmpl", - ".html.md", - ".md.tmpl", - ), - checkAllowedExtensions( - ".md", - ), +func (v *validator) validate(ctx context.Context) error { + var result error + + var err error + + if v.providerName == "" { + v.providerName = filepath.Base(v.providerDir) } - issues := []issue{} - for _, c := range checks { - checkIssues, err := c(dir) + + if v.providersSchemaPath == "" { + v.logger.infof("exporting schema from Terraform") + v.providerSchema, err = TerraformProviderSchemaFromTerraform(ctx, v.providerName, v.providerDir, v.tfVersion, v.logger) if err != nil { - return err + return fmt.Errorf("error exporting provider schema from Terraform: %w", err) + } + } else { + v.logger.infof("exporting schema from JSON file") + v.providerSchema, err = TerraformProviderSchemaFromFile(v.providerName, v.providersSchemaPath, v.logger) + if err != nil { + return fmt.Errorf("error exporting provider schema from JSON file: %w", err) } - issues = append(issues, checkIssues...) } - for _, issue := range issues { - ui.Warn(fmt.Sprintf("%s: %s", issue.file, issue.message)) + + providerFs := os.DirFS(v.providerDir) + + files, globErr := doublestar.Glob(providerFs, DocumentationGlobPattern) + if globErr != nil { + return fmt.Errorf("error finding documentation files: %w", err) } - if len(issues) > 0 { - return fmt.Errorf("invalid templates directory") + + log.Printf("[DEBUG] Found documentation files %v", files) + + v.logger.infof("running mixed directories check") + err = check.MixedDirectoriesCheck(files) + result = errors.Join(result, err) + + if dirExists(filepath.Join(v.providerDir, "docs")) { + v.logger.infof("detected static docs directory, running checks") + err = v.validateStaticDocs(filepath.Join(v.providerDir, "docs")) + result = errors.Join(result, err) + + } + if dirExists(filepath.Join(v.providerDir, filepath.Join("website", "docs"))) { + v.logger.infof("detected legacy website directory, running checks") + err = v.validateLegacyWebsite(filepath.Join(v.providerDir, "website/docs")) + result = errors.Join(result, err) } - return nil -} -func validateLegacyWebsite(ui cli.Ui, dir string) error { - panic("not implemented") + return result } -type issue struct { - file string - message string -} +func (v *validator) validateStaticDocs(dir string) error { -type check func(dir string) ([]issue, error) + var result error -func checkBlockedExtensions(exts ...string) check { - return func(dir string) ([]issue, error) { - issues := []issue{} - err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if info.IsDir() { - return nil - } - for _, ext := range exts { - if strings.HasSuffix(path, ext) { - _, file := filepath.Split(path) - issues = append(issues, issue{ - file: path, - message: fmt.Sprintf("the extension for %q is not supported", file), - }) - break - } - } - return nil - }) + options := &check.ProviderFileOptions{ + FrontMatter: RegistryFrontMatterOptions, + ValidExtensions: ValidRegistryFileExtensions, + } + + var files []string + + err := filepath.WalkDir(dir, func(path string, d os.DirEntry, err error) error { if err != nil { - return nil, err + return fmt.Errorf("error walking directory %q: %w", dir, err) } - return issues, nil - } -} -func checkAllowedExtensions(exts ...string) check { - return func(dir string) ([]issue, error) { - issues := []issue{} - err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + rel, err := filepath.Rel(v.providerDir, path) + if err != nil { + return err + } + if d.IsDir() { + match, err := doublestar.PathMatch(filepath.FromSlash(DocumentationDirGlobPattern), rel) if err != nil { return err } - if info.IsDir() { - return nil - } - valid := false - for _, ext := range exts { - if strings.HasSuffix(path, ext) { - valid = true - break - } - } - if !valid { - _, file := filepath.Split(path) - issues = append(issues, issue{ - file: path, - message: fmt.Sprintf("the extension for %q is not expected", file), - }) + if !match { + return nil // skip valid non-documentation directories } + + v.logger.infof("running invalid directories check on %s", rel) + result = errors.Join(result, check.InvalidDirectoriesCheck(rel)) return nil - }) + } + match, err := doublestar.PathMatch(filepath.FromSlash(DocumentationGlobPattern), rel) if err != nil { - return nil, err + return err + } + if !match { + return nil // skip valid non-documentation files } - return issues, nil + + // Configure FrontMatterOptions based on file type + if d.Name() == "index.md" { + options.FrontMatter = RegistryIndexFrontMatterOptions + } else if _, relErr := filepath.Rel(rel, "guides"); relErr != nil { + options.FrontMatter = RegistryGuideFrontMatterOptions + } else { + options.FrontMatter = RegistryFrontMatterOptions + } + v.logger.infof("running file checks on %s", rel) + result = errors.Join(result, check.NewProviderFileCheck(options).Run(path)) + + files = append(files, path) + return nil + }) + if err != nil { + return fmt.Errorf("error walking directory %q: %w", dir, err) } -} -func checkAllowedDirs(dirs ...string) check { - allowedDirs := map[string]bool{} - for _, d := range dirs { - allowedDirs[d] = true + mismatchOpt := &check.FileMismatchOptions{ + ProviderShortName: providerShortName(v.providerName), + Schema: v.providerSchema, + } + + if dirExists(filepath.Join(dir, "data-sources")) { + dataSourceFiles, _ := os.ReadDir(filepath.Join(dir, "data-sources")) + mismatchOpt.DatasourceEntries = dataSourceFiles + } + if dirExists(filepath.Join(dir, "resources")) { + resourceFiles, _ := os.ReadDir(filepath.Join(dir, "resources")) + mismatchOpt.ResourceEntries = resourceFiles + } + if dirExists(filepath.Join(dir, "functions")) { + functionFiles, _ := os.ReadDir(filepath.Join(dir, "functions")) + mismatchOpt.FunctionEntries = functionFiles } - return func(dir string) ([]issue, error) { - issues := []issue{} + v.logger.infof("running file mismatch check") + if err := check.NewFileMismatchCheck(mismatchOpt).Run(); err != nil { + result = errors.Join(result, err) + } + + return result +} + +func (v *validator) validateLegacyWebsite(dir string) error { + + var result error + + options := &check.ProviderFileOptions{ + FrontMatter: LegacyFrontMatterOptions, + ValidExtensions: ValidLegacyFileExtensions, + } - f, err := os.Open(dir) + var files []string + err := filepath.WalkDir(dir, func(path string, d os.DirEntry, err error) error { if err != nil { - return nil, err + return fmt.Errorf("error walking directory %q: %w", dir, err) } - infos, err := f.Readdir(-1) + + rel, err := filepath.Rel(v.providerDir, path) if err != nil { - return nil, err + return err } - - for _, fi := range infos { - if !fi.IsDir() { - continue + if d.IsDir() { + match, err := doublestar.PathMatch(filepath.FromSlash(DocumentationDirGlobPattern), rel) + if err != nil { + return err } - - if !allowedDirs[fi.Name()] { - issues = append(issues, issue{ - file: filepath.Join(dir, fi.Name()), - message: fmt.Sprintf("directory %q is not allowed", fi.Name()), - }) + if !match { + return nil // skip valid non-documentation directories } + + v.logger.infof("running invalid directories check on %s", rel) + result = errors.Join(result, check.InvalidDirectoriesCheck(rel)) + return nil } - return issues, nil - } -} + match, err := doublestar.PathMatch(filepath.FromSlash(DocumentationGlobPattern), rel) + if err != nil { + return err + } + if !match { + return nil // skip non-documentation files + } + + // Configure FrontMatterOptions based on file type + if d.Name() == "index.md" { + options.FrontMatter = LegacyIndexFrontMatterOptions + } else if _, relErr := filepath.Rel(rel, "guides"); relErr != nil { + options.FrontMatter = LegacyGuideFrontMatterOptions + } else { + options.FrontMatter = LegacyFrontMatterOptions + } + v.logger.infof("running file checks on %s", rel) + result = errors.Join(result, check.NewProviderFileCheck(options).Run(path)) -func checkAllowedFiles(dirs ...string) check { - allowedFiles := map[string]bool{} - for _, d := range dirs { - allowedFiles[d] = true + files = append(files, path) + return nil + }) + if err != nil { + return fmt.Errorf("error walking directory %q: %w", dir, err) } - return func(dir string) ([]issue, error) { - issues := []issue{} + mismatchOpt := &check.FileMismatchOptions{ + ProviderShortName: providerShortName(v.providerName), + Schema: v.providerSchema, + } - f, err := os.Open(dir) - if err != nil { - return nil, err - } - infos, err := f.Readdir(-1) - if err != nil { - return nil, err - } + if dirExists(filepath.Join(dir, "d")) { + dataSourceFiles, _ := os.ReadDir(filepath.Join(dir, "d")) + mismatchOpt.DatasourceEntries = dataSourceFiles + } + if dirExists(filepath.Join(dir, "r")) { + resourceFiles, _ := os.ReadDir(filepath.Join(dir, "r")) + mismatchOpt.ResourceEntries = resourceFiles + } + if dirExists(filepath.Join(dir, "functions")) { + functionFiles, _ := os.ReadDir(filepath.Join(dir, "functions")) + mismatchOpt.FunctionEntries = functionFiles + } - for _, fi := range infos { - if fi.IsDir() { - continue - } + v.logger.infof("running file mismatch check") + if err := check.NewFileMismatchCheck(mismatchOpt).Run(); err != nil { + result = errors.Join(result, err) + } - if !allowedFiles[fi.Name()] { - issues = append(issues, issue{ - file: filepath.Join(dir, fi.Name()), - message: fmt.Sprintf("file %q is not allowed", fi.Name()), - }) - } - } + return result +} - return issues, nil +func dirExists(name string) bool { + if file, err := os.Stat(name); err != nil { + return false + } else if !file.IsDir() { + return false } + + return true } diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/behaviors.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/schemamd/behaviors.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/behaviors.go rename to vendor/github.com/hashicorp/terraform-plugin-docs/internal/schemamd/behaviors.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/render.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/schemamd/render.go similarity index 90% rename from vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/render.go rename to vendor/github.com/hashicorp/terraform-plugin-docs/internal/schemamd/render.go index 90617d86..a459d0ba 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/render.go +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/schemamd/render.go @@ -57,11 +57,12 @@ var ( ) type nestedType struct { - anchorID string - path []string - block *tfjson.SchemaBlock - object *cty.Type - attrs *tfjson.SchemaNestedAttributeType + anchorID string + pathTitle string + path []string + block *tfjson.SchemaBlock + object *cty.Type + attrs *tfjson.SchemaNestedAttributeType group groupFilter } @@ -87,6 +88,7 @@ func writeAttribute(w io.Writer, path []string, att *tfjson.SchemaAttribute, gro } anchorID := "nestedatt--" + strings.Join(path, "--") + pathTitle := strings.Join(path, ".") nestedTypes := []nestedType{} switch { case att.AttributeNestedType != nil: @@ -96,9 +98,10 @@ func writeAttribute(w io.Writer, path []string, att *tfjson.SchemaAttribute, gro } nestedTypes = append(nestedTypes, nestedType{ - anchorID: anchorID, - path: path, - attrs: att.AttributeNestedType, + anchorID: anchorID, + pathTitle: pathTitle, + path: path, + attrs: att.AttributeNestedType, group: group, }) @@ -109,9 +112,10 @@ func writeAttribute(w io.Writer, path []string, att *tfjson.SchemaAttribute, gro } nestedTypes = append(nestedTypes, nestedType{ - anchorID: anchorID, - path: path, - object: &att.AttributeType, + anchorID: anchorID, + pathTitle: pathTitle, + path: path, + object: &att.AttributeType, group: group, }) @@ -123,9 +127,10 @@ func writeAttribute(w io.Writer, path []string, att *tfjson.SchemaAttribute, gro nt := att.AttributeType.ElementType() nestedTypes = append(nestedTypes, nestedType{ - anchorID: anchorID, - path: path, - object: &nt, + anchorID: anchorID, + pathTitle: pathTitle, + path: path, + object: &nt, group: group, }) @@ -153,10 +158,12 @@ func writeBlockType(w io.Writer, path []string, block *tfjson.SchemaBlockType) ( } anchorID := "nestedblock--" + strings.Join(path, "--") + pathTitle := strings.Join(path, ".") nt := nestedType{ - anchorID: anchorID, - path: path, - block: block.Block, + anchorID: anchorID, + pathTitle: pathTitle, + path: path, + block: block.Block, } _, err = io.WriteString(w, " (see [below for nested schema](#"+anchorID+"))") @@ -231,7 +238,7 @@ nameLoop: // // If a `.Description` is provided instead, the behaviour will be the // same as for every other attribute. - if strings.ToLower(n) == "id" && childAtt.Description == "" { + if strings.ToLower(n) == "id" && len(parents) == 0 && childAtt.Description == "" { if strings.Contains(gf.topLevelTitle, "Read-Only") { childAtt.Description = "The ID of this resource." groups[i] = append(groups[i], n) @@ -344,7 +351,7 @@ func writeNestedTypes(w io.Writer, nestedTypes []nestedType) error { return err } - _, err = io.WriteString(w, "### Nested Schema for `"+strings.Join(nt.path, ".")+"`\n\n") + _, err = io.WriteString(w, "### Nested Schema for `"+nt.pathTitle+"`\n\n") if err != nil { return err } @@ -401,6 +408,7 @@ func writeObjectAttribute(w io.Writer, path []string, att cty.Type, group groupF } anchorID := "nestedobjatt--" + strings.Join(path, "--") + pathTitle := strings.Join(path, ".") nestedTypes := []nestedType{} switch { case att.IsObjectType(): @@ -410,9 +418,10 @@ func writeObjectAttribute(w io.Writer, path []string, att cty.Type, group groupF } nestedTypes = append(nestedTypes, nestedType{ - anchorID: anchorID, - path: path, - object: &att, + anchorID: anchorID, + pathTitle: pathTitle, + path: path, + object: &att, group: group, }) @@ -424,9 +433,10 @@ func writeObjectAttribute(w io.Writer, path []string, att cty.Type, group groupF nt := att.ElementType() nestedTypes = append(nestedTypes, nestedType{ - anchorID: anchorID, - path: path, - object: &nt, + anchorID: anchorID, + pathTitle: pathTitle, + path: path, + object: &nt, group: group, }) @@ -456,7 +466,9 @@ func writeObjectChildren(w io.Writer, parents []string, ty cty.Type, group group for _, name := range sortedNames { att := atts[name] - path := append(parents, name) + path := make([]string, len(parents), len(parents)+1) + copy(path, parents) + path = append(path, name) nt, err := writeObjectAttribute(w, path, att, group) if err != nil { @@ -512,7 +524,9 @@ func writeNestedAttributeChildren(w io.Writer, parents []string, nestedAttribute for _, name := range names { att := nestedAttributes.Attributes[name] - path := append(parents, name) + path := make([]string, len(parents), len(parents)+1) + copy(path, parents) + path = append(path, name) nt, err := writeAttribute(w, path, att, group) if err != nil { diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/write_attribute_description.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/schemamd/write_attribute_description.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/write_attribute_description.go rename to vendor/github.com/hashicorp/terraform-plugin-docs/internal/schemamd/write_attribute_description.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/write_block_type_description.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/schemamd/write_block_type_description.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/write_block_type_description.go rename to vendor/github.com/hashicorp/terraform-plugin-docs/internal/schemamd/write_block_type_description.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/write_nested_attribute_type_description.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/schemamd/write_nested_attribute_type_description.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/write_nested_attribute_type_description.go rename to vendor/github.com/hashicorp/terraform-plugin-docs/internal/schemamd/write_nested_attribute_type_description.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/write_type.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/schemamd/write_type.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/write_type.go rename to vendor/github.com/hashicorp/terraform-plugin-docs/internal/schemamd/write_type.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/compare/doc.go b/vendor/github.com/hashicorp/terraform-plugin-testing/compare/doc.go new file mode 100644 index 00000000..feb4a4c0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/compare/doc.go @@ -0,0 +1,5 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package compare contains the value comparer interface, and types implementing the value comparer interface. +package compare diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/compare/value_comparer.go b/vendor/github.com/hashicorp/terraform-plugin-testing/compare/value_comparer.go new file mode 100644 index 00000000..af635898 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/compare/value_comparer.go @@ -0,0 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package compare + +// ValueComparer defines an interface that is implemented to run comparison logic on multiple values. Individual +// implementations determine how the comparison is performed (e.g., values differ, values equal). +type ValueComparer interface { + // CompareValues should assert the given known values against any expectations. + // Values are always ordered in the order they were added. Use the error + // return to signal unexpected values or implementation errors. + CompareValues(values ...any) error +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/compare/values_differ.go b/vendor/github.com/hashicorp/terraform-plugin-testing/compare/values_differ.go new file mode 100644 index 00000000..24bd2ae2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/compare/values_differ.go @@ -0,0 +1,31 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package compare + +import ( + "fmt" + "reflect" +) + +var _ ValueComparer = valuesDiffer{} + +type valuesDiffer struct{} + +// CompareValues determines whether each value in the sequence of the supplied values +// differs from the preceding value. +func (v valuesDiffer) CompareValues(values ...any) error { + for i := 1; i < len(values); i++ { + if reflect.DeepEqual(values[i-1], values[i]) { + return fmt.Errorf("expected values to differ, but they are the same: %v == %v", values[i-1], values[i]) + } + } + + return nil +} + +// ValuesDiffer returns a ValueComparer for asserting that each value in the sequence of +// the values supplied to the CompareValues method differs from the preceding value. +func ValuesDiffer() valuesDiffer { + return valuesDiffer{} +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/compare/values_same.go b/vendor/github.com/hashicorp/terraform-plugin-testing/compare/values_same.go new file mode 100644 index 00000000..46ee13f3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/compare/values_same.go @@ -0,0 +1,31 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package compare + +import ( + "fmt" + "reflect" +) + +var _ ValueComparer = valuesSame{} + +type valuesSame struct{} + +// CompareValues determines whether each value in the sequence of the supplied values +// is the same as the preceding value. +func (v valuesSame) CompareValues(values ...any) error { + for i := 1; i < len(values); i++ { + if !reflect.DeepEqual(values[i-1], values[i]) { + return fmt.Errorf("expected values to be the same, but they differ: %v != %v", values[i-1], values[i]) + } + } + + return nil +} + +// ValuesSame returns a ValueComparer for asserting that each value in the sequence of +// the values supplied to the CompareValues method is the same as the preceding value. +func ValuesSame() valuesSame { + return valuesSame{} +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/additional_cli_options.go b/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/additional_cli_options.go new file mode 100644 index 00000000..62578ede --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/additional_cli_options.go @@ -0,0 +1,26 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resource + +// AdditionalCLIOptions allows an intentionally limited set of options to be passed +// to the Terraform CLI when executing test steps. +type AdditionalCLIOptions struct { + // Apply represents options to be passed to the `terraform apply` command. + Apply ApplyOptions + + // Plan represents options to be passed to the `terraform plan` command. + Plan PlanOptions +} + +// ApplyOptions represents options to be passed to the `terraform apply` command. +type ApplyOptions struct { + // AllowDeferral will pass the experimental `-allow-deferral` flag to the apply command. + AllowDeferral bool +} + +// PlanOptions represents options to be passed to the `terraform plan` command. +type PlanOptions struct { + // AllowDeferral will pass the experimental `-allow-deferral` flag to the plan command. + AllowDeferral bool +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/state_shim.go b/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/state_shim.go index c9659dc9..2cd8407f 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/state_shim.go +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/state_shim.go @@ -62,27 +62,45 @@ func shimOutputState(so *tfjson.StateOutput) (*terraform.OutputState, error) { os.Value = v return os, nil } + switch firstElem := v[0].(type) { case string: elements := make([]interface{}, len(v)) for i, el := range v { - //nolint:forcetypeassert // Guaranteed by type switch - elements[i] = el.(string) + strElement, ok := el.(string) + // If the type of the element doesn't match the first elem, it's a tuple, return the original value + if !ok { + os.Value = v + return os, nil + } + elements[i] = strElement } os.Value = elements case bool: elements := make([]interface{}, len(v)) for i, el := range v { - //nolint:forcetypeassert // Guaranteed by type switch - elements[i] = el.(bool) + boolElement, ok := el.(bool) + // If the type of the element doesn't match the first elem, it's a tuple, return the original value + if !ok { + os.Value = v + return os, nil + } + + elements[i] = boolElement } os.Value = elements // unmarshalled number from JSON will always be json.Number case json.Number: elements := make([]interface{}, len(v)) for i, el := range v { - //nolint:forcetypeassert // Guaranteed by type switch - elements[i] = el.(json.Number) + numberElement, ok := el.(json.Number) + // If the type of the element doesn't match the first elem, it's a tuple, return the original value + if !ok { + os.Value = v + return os, nil + } + + elements[i] = numberElement } os.Value = elements case []interface{}: diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing.go index f98abda3..9e1961a4 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing.go +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing.go @@ -441,6 +441,10 @@ type TestCase struct { // set to "1", to persist any working directory files. Otherwise, this directory is // automatically cleaned up at the end of the TestCase. WorkingDir string + + // AdditionalCLIOptions allows an intentionally limited set of options to be passed + // to the Terraform CLI when executing test steps. + AdditionalCLIOptions *AdditionalCLIOptions } // ExternalProvider holds information about third-party providers that should diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing_new_config.go b/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing_new_config.go index 9747eaf6..1456f7fb 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing_new_config.go +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/helper/resource/testing_new_config.go @@ -107,6 +107,11 @@ func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugint if step.Destroy { opts = append(opts, tfexec.Destroy(true)) } + + if c.AdditionalCLIOptions != nil && c.AdditionalCLIOptions.Plan.AllowDeferral { + opts = append(opts, tfexec.AllowDeferral(true)) + } + return wd.CreatePlan(ctx, opts...) }, wd, providers) if err != nil { @@ -168,7 +173,13 @@ func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugint // Apply the diff, creating real resources err = runProviderCommand(ctx, t, func() error { - return wd.Apply(ctx) + var opts []tfexec.ApplyOption + + if c.AdditionalCLIOptions != nil && c.AdditionalCLIOptions.Apply.AllowDeferral { + opts = append(opts, tfexec.AllowDeferral(true)) + } + + return wd.Apply(ctx, opts...) }, wd, providers) if err != nil { if step.Destroy { @@ -238,6 +249,11 @@ func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugint if step.Destroy { opts = append(opts, tfexec.Destroy(true)) } + + if c.AdditionalCLIOptions != nil && c.AdditionalCLIOptions.Plan.AllowDeferral { + opts = append(opts, tfexec.AllowDeferral(true)) + } + return wd.CreatePlan(ctx, opts...) }, wd, providers) if err != nil { @@ -302,6 +318,11 @@ func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugint opts = append(opts, tfexec.Refresh(false)) } } + + if c.AdditionalCLIOptions != nil && c.AdditionalCLIOptions.Plan.AllowDeferral { + opts = append(opts, tfexec.AllowDeferral(true)) + } + return wd.CreatePlan(ctx, opts...) }, wd, providers) if err != nil { diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/internal/plugintest/working_dir.go b/vendor/github.com/hashicorp/terraform-plugin-testing/internal/plugintest/working_dir.go index 2c8888d7..0cff3340 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-testing/internal/plugintest/working_dir.go +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/internal/plugintest/working_dir.go @@ -257,8 +257,9 @@ func (wd *WorkingDir) CreatePlan(ctx context.Context, opts ...tfexec.PlanOption) // successfully and the saved plan has not been cleared in the meantime then // this will apply the saved plan. Otherwise, it will implicitly create a new // plan and apply it. -func (wd *WorkingDir) Apply(ctx context.Context) error { +func (wd *WorkingDir) Apply(ctx context.Context, opts ...tfexec.ApplyOption) error { args := []tfexec.ApplyOption{tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false)} + args = append(args, opts...) if wd.HasSavedPlan() { args = append(args, tfexec.DirOrPlan(PlanFileName)) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/knownvalue/float32.go b/vendor/github.com/hashicorp/terraform-plugin-testing/knownvalue/float32.go new file mode 100644 index 00000000..ee02fdcb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/knownvalue/float32.go @@ -0,0 +1,51 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package knownvalue + +import ( + "encoding/json" + "fmt" + "strconv" +) + +var _ Check = float32Exact{} + +type float32Exact struct { + value float32 +} + +// CheckValue determines whether the passed value is of type float32, and +// contains a matching float32 value. +func (v float32Exact) CheckValue(other any) error { + jsonNum, ok := other.(json.Number) + + if !ok { + return fmt.Errorf("expected json.Number value for Float32Exact check, got: %T", other) + } + + otherVal, err := strconv.ParseFloat(string(jsonNum), 32) + + if err != nil { + return fmt.Errorf("expected json.Number to be parseable as float32 value for Float32Exact check: %s", err) + } + + if float32(otherVal) != v.value { + return fmt.Errorf("expected value %s for Float32Exact check, got: %s", v.String(), strconv.FormatFloat(otherVal, 'f', -1, 32)) + } + + return nil +} + +// String returns the string representation of the float32 value. +func (v float32Exact) String() string { + return strconv.FormatFloat(float64(v.value), 'f', -1, 32) +} + +// Float32Exact returns a Check for asserting equality between the +// supplied float32 and the value passed to the CheckValue method. +func Float32Exact(value float32) float32Exact { + return float32Exact{ + value: value, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/knownvalue/int32.go b/vendor/github.com/hashicorp/terraform-plugin-testing/knownvalue/int32.go new file mode 100644 index 00000000..49dd30bb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/knownvalue/int32.go @@ -0,0 +1,51 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package knownvalue + +import ( + "encoding/json" + "fmt" + "strconv" +) + +var _ Check = int32Exact{} + +type int32Exact struct { + value int32 +} + +// CheckValue determines whether the passed value is of type int32, and +// contains a matching int32 value. +func (v int32Exact) CheckValue(other any) error { + jsonNum, ok := other.(json.Number) + + if !ok { + return fmt.Errorf("expected json.Number value for Int32Exact check, got: %T", other) + } + + otherVal, err := strconv.ParseInt(string(jsonNum), 10, 32) + + if err != nil { + return fmt.Errorf("expected json.Number to be parseable as int32 value for Int32Exact check: %s", err) + } + + if int32(otherVal) != v.value { + return fmt.Errorf("expected value %d for Int32Exact check, got: %d", v.value, otherVal) + } + + return nil +} + +// String returns the string representation of the int32 value. +func (v int32Exact) String() string { + return strconv.FormatInt(int64(v.value), 10) +} + +// Int32Exact returns a Check for asserting equality between the +// supplied int32 and the value passed to the CheckValue method. +func Int32Exact(value int32) int32Exact { + return int32Exact{ + value: value, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/knownvalue/tuple.go b/vendor/github.com/hashicorp/terraform-plugin-testing/knownvalue/tuple.go new file mode 100644 index 00000000..c034bba7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/knownvalue/tuple.go @@ -0,0 +1,67 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package knownvalue + +import ( + "fmt" +) + +var _ Check = tupleExact{} + +type tupleExact struct { + value []Check +} + +// CheckValue determines whether the passed value is of type []any, and +// contains matching slice entries in the same sequence. +func (v tupleExact) CheckValue(other any) error { + otherVal, ok := other.([]any) + + if !ok { + return fmt.Errorf("expected []any value for TupleExact check, got: %T", other) + } + + if len(otherVal) != len(v.value) { + expectedElements := "elements" + actualElements := "elements" + + if len(v.value) == 1 { + expectedElements = "element" + } + + if len(otherVal) == 1 { + actualElements = "element" + } + + return fmt.Errorf("expected %d %s for TupleExact check, got %d %s", len(v.value), expectedElements, len(otherVal), actualElements) + } + + for i := 0; i < len(v.value); i++ { + if err := v.value[i].CheckValue(otherVal[i]); err != nil { + return fmt.Errorf("tuple element index %d: %s", i, err) + } + } + + return nil +} + +// String returns the string representation of the value. +func (v tupleExact) String() string { + var tupleVals []string + + for _, val := range v.value { + tupleVals = append(tupleVals, val.String()) + } + + return fmt.Sprintf("%s", tupleVals) +} + +// TupleExact returns a Check for asserting equality between the +// supplied []Check and the value passed to the CheckValue method. +// This is an order-dependent check. +func TupleExact(value []Check) tupleExact { + return tupleExact{ + value: value, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/knownvalue/tuple_partial.go b/vendor/github.com/hashicorp/terraform-plugin-testing/knownvalue/tuple_partial.go new file mode 100644 index 00000000..cfd73b2d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/knownvalue/tuple_partial.go @@ -0,0 +1,89 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package knownvalue + +import ( + "bytes" + "fmt" + "sort" + "strings" +) + +var _ Check = tuplePartial{} + +type tuplePartial struct { + value map[int]Check +} + +// CheckValue determines whether the passed value is of type []any, and +// contains matching slice entries in the same sequence. +func (v tuplePartial) CheckValue(other any) error { + otherVal, ok := other.([]any) + + if !ok { + return fmt.Errorf("expected []any value for TuplePartial check, got: %T", other) + } + + var keys []int + + for k := range v.value { + keys = append(keys, k) + } + + sort.SliceStable(keys, func(i, j int) bool { + return keys[i] < keys[j] + }) + + for _, k := range keys { + if len(otherVal) <= k { + return fmt.Errorf("missing element index %d for TuplePartial check", k) + } + + if err := v.value[k].CheckValue(otherVal[k]); err != nil { + return fmt.Errorf("tuple element %d: %s", k, err) + } + } + + return nil +} + +// String returns the string representation of the value. +func (v tuplePartial) String() string { + var b bytes.Buffer + + b.WriteString("[") + + var keys []int + + var tupleVals []string + + for k := range v.value { + keys = append(keys, k) + } + + sort.SliceStable(keys, func(i, j int) bool { + return keys[i] < keys[j] + }) + + for _, k := range keys { + tupleVals = append(tupleVals, fmt.Sprintf("%d:%s", k, v.value[k])) + } + + b.WriteString(strings.Join(tupleVals, " ")) + + b.WriteString("]") + + return b.String() +} + +// TuplePartial returns a Check for asserting partial equality between the +// supplied map[int]Check and the value passed to the CheckValue method. The +// map keys represent the zero-ordered element indices within the tuple that is +// being checked. Only the elements at the indices defined within the +// supplied map[int]Check are checked. +func TuplePartial(value map[int]Check) tuplePartial { + return tuplePartial{ + value: value, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/knownvalue/tuple_size.go b/vendor/github.com/hashicorp/terraform-plugin-testing/knownvalue/tuple_size.go new file mode 100644 index 00000000..a2c2dce3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/knownvalue/tuple_size.go @@ -0,0 +1,55 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package knownvalue + +import ( + "fmt" + "strconv" +) + +var _ Check = tupleSizeExact{} + +type tupleSizeExact struct { + size int +} + +// CheckValue verifies that the passed value is a tuple, map, object, +// or set, and contains a matching number of elements. +func (v tupleSizeExact) CheckValue(other any) error { + otherVal, ok := other.([]any) + + if !ok { + return fmt.Errorf("expected []any value for TupleSizeExact check, got: %T", other) + } + + if len(otherVal) != v.size { + expectedElements := "elements" + actualElements := "elements" + + if v.size == 1 { + expectedElements = "element" + } + + if len(otherVal) == 1 { + actualElements = "element" + } + + return fmt.Errorf("expected %d %s for TupleSizeExact check, got %d %s", v.size, expectedElements, len(otherVal), actualElements) + } + + return nil +} + +// String returns the string representation of the value. +func (v tupleSizeExact) String() string { + return strconv.FormatInt(int64(v.size), 10) +} + +// TupleSizeExact returns a Check for asserting that +// a tuple has size elements. +func TupleSizeExact(size int) tupleSizeExact { + return tupleSizeExact{ + size: size, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/deferred_reason.go b/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/deferred_reason.go new file mode 100644 index 00000000..4787a8c3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/deferred_reason.go @@ -0,0 +1,21 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plancheck + +// DeferredReason is a string stored in the plan file which indicates why Terraform +// is deferring a change for a resource. +type DeferredReason string + +const ( + // DeferredReasonResourceConfigUnknown is used to indicate that the resource configuration + // is partially unknown and the real values need to be known before the change can be planned. + DeferredReasonResourceConfigUnknown DeferredReason = "resource_config_unknown" + + // DeferredReasonProviderConfigUnknown is used to indicate that the provider configuration + // is partially unknown and the real values need to be known before the change can be planned. + DeferredReasonProviderConfigUnknown DeferredReason = "provider_config_unknown" + + // DeferredReasonAbsentPrereq is used to indicate that a hard dependency has not been satisfied. + DeferredReasonAbsentPrereq DeferredReason = "absent_prereq" +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_deferred_change.go b/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_deferred_change.go new file mode 100644 index 00000000..14310ca3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_deferred_change.go @@ -0,0 +1,49 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plancheck + +import ( + "context" + "fmt" +) + +var _ PlanCheck = expectDeferredChange{} + +type expectDeferredChange struct { + resourceAddress string + reason DeferredReason +} + +// CheckPlan implements the plan check logic. +func (e expectDeferredChange) CheckPlan(ctx context.Context, req CheckPlanRequest, resp *CheckPlanResponse) { + foundResource := false + + for _, dc := range req.Plan.DeferredChanges { + if dc.ResourceChange == nil || e.resourceAddress != dc.ResourceChange.Address { + continue + } + + if e.reason != DeferredReason(dc.Reason) { + resp.Error = fmt.Errorf("'%s' - expected %q, got deferred reason: %q", dc.ResourceChange.Address, e.reason, dc.Reason) + return + } + + foundResource = true + break + } + + if !foundResource { + resp.Error = fmt.Errorf("%s - No deferred changes found for resource", e.resourceAddress) + return + } +} + +// ExpectDeferredChange returns a plan check that asserts that a given resource will have a +// deferred change in the plan with the given reason. +func ExpectDeferredChange(resourceAddress string, reason DeferredReason) PlanCheck { + return expectDeferredChange{ + resourceAddress: resourceAddress, + reason: reason, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_no_deferred_changes.go b/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_no_deferred_changes.go new file mode 100644 index 00000000..726ea680 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/plancheck/expect_no_deferred_changes.go @@ -0,0 +1,52 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plancheck + +import ( + "context" + "errors" + "fmt" +) + +var _ PlanCheck = expectNoDeferredChanges{} + +type expectNoDeferredChanges struct{} + +// CheckPlan implements the plan check logic. +func (e expectNoDeferredChanges) CheckPlan(ctx context.Context, req CheckPlanRequest, resp *CheckPlanResponse) { + if len(req.Plan.DeferredChanges) == 0 { + return + } + + var result []error + for _, deferred := range req.Plan.DeferredChanges { + resourceAddress := "unknown" + if deferred.ResourceChange != nil { + resourceAddress = deferred.ResourceChange.Address + } + + result = append(result, fmt.Errorf("expected no deferred changes, but resource %q is deferred with reason: %q", resourceAddress, deferred.Reason)) + } + + resp.Error = errors.Join(result...) + if resp.Error != nil { + return + } + + if req.Plan.Complete == nil { + resp.Error = errors.New("expected plan to be marked as complete, but complete field was not set in plan (nil). This indicates that the plan was created with a version of Terraform older than 1.8, which does not support the complete field.") + return + } + + if !*req.Plan.Complete { + resp.Error = errors.New("expected plan to be marked as complete, but complete was \"false\", indicating that at least one more plan/apply round is needed to converge.") + return + } +} + +// ExpectNoDeferredChanges returns a plan check that asserts that there are no deferred changes +// for any resources in the plan. +func ExpectNoDeferredChanges() PlanCheck { + return expectNoDeferredChanges{} +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/statecheck/compare_value.go b/vendor/github.com/hashicorp/terraform-plugin-testing/statecheck/compare_value.go new file mode 100644 index 00000000..68a6ef9d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/statecheck/compare_value.go @@ -0,0 +1,114 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statecheck + +import ( + "context" + "fmt" + + tfjson "github.com/hashicorp/terraform-json" + + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" +) + +// Resource State Check +var _ StateCheck = &compareValue{} + +type compareValue struct { + resourceAddresses []string + attributePaths []tfjsonpath.Path + stateValues []any + comparer compare.ValueComparer +} + +func (e *compareValue) AddStateValue(resourceAddress string, attributePath tfjsonpath.Path) StateCheck { + e.resourceAddresses = append(e.resourceAddresses, resourceAddress) + e.attributePaths = append(e.attributePaths, attributePath) + + return e +} + +// CheckState implements the state check logic. +func (e *compareValue) CheckState(ctx context.Context, req CheckStateRequest, resp *CheckStateResponse) { + var resource *tfjson.StateResource + + if req.State == nil { + resp.Error = fmt.Errorf("state is nil") + + return + } + + if req.State.Values == nil { + resp.Error = fmt.Errorf("state does not contain any state values") + + return + } + + if req.State.Values.RootModule == nil { + resp.Error = fmt.Errorf("state does not contain a root module") + + return + } + + // All calls to AddStateValue occur before any TestStep is run, populating the resourceAddresses + // and attributePaths slices. The stateValues slice is populated during execution of each TestStep. + // Each call to CheckState happens sequentially during each TestStep. + // The currentIndex is reflective of the current state value being checked. + currentIndex := len(e.stateValues) + + if len(e.resourceAddresses) <= currentIndex { + resp.Error = fmt.Errorf("resource addresses index out of bounds: %d", currentIndex) + + return + } + + resourceAddress := e.resourceAddresses[currentIndex] + + for _, r := range req.State.Values.RootModule.Resources { + if resourceAddress == r.Address { + resource = r + + break + } + } + + if resource == nil { + resp.Error = fmt.Errorf("%s - Resource not found in state", resourceAddress) + + return + } + + if len(e.attributePaths) <= currentIndex { + resp.Error = fmt.Errorf("attribute paths index out of bounds: %d", currentIndex) + + return + } + + attributePath := e.attributePaths[currentIndex] + + result, err := tfjsonpath.Traverse(resource.AttributeValues, attributePath) + + if err != nil { + resp.Error = err + + return + } + + e.stateValues = append(e.stateValues, result) + + err = e.comparer.CompareValues(e.stateValues...) + + if err != nil { + resp.Error = err + } +} + +// CompareValue returns a state check that compares values retrieved from state using the +// supplied value comparer. +func CompareValue(comparer compare.ValueComparer) *compareValue { + return &compareValue{ + comparer: comparer, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/statecheck/compare_value_collection.go b/vendor/github.com/hashicorp/terraform-plugin-testing/statecheck/compare_value_collection.go new file mode 100644 index 00000000..7a06c601 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/statecheck/compare_value_collection.go @@ -0,0 +1,223 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statecheck + +import ( + "context" + "errors" + "fmt" + "sort" + + tfjson "github.com/hashicorp/terraform-json" + + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" +) + +// Resource State Check +var _ StateCheck = &compareValueCollection{} + +type compareValueCollection struct { + resourceAddressOne string + collectionPath []tfjsonpath.Path + resourceAddressTwo string + attributePath tfjsonpath.Path + comparer compare.ValueComparer +} + +func walkCollectionPath(obj any, paths []tfjsonpath.Path, results []any) ([]any, error) { + switch t := obj.(type) { + case []any: + for _, v := range t { + if len(paths) == 0 { + results = append(results, v) + continue + } + + x, err := tfjsonpath.Traverse(v, paths[0]) + + if err != nil { + return results, err + } + + results, err = walkCollectionPath(x, paths[1:], results) + + if err != nil { + return results, err + } + } + case map[string]any: + keys := make([]string, 0, len(t)) + + for k := range t { + keys = append(keys, k) + } + + sort.Strings(keys) + + for _, key := range keys { + if len(paths) == 0 { + results = append(results, t[key]) + continue + } + + x, err := tfjsonpath.Traverse(t, paths[0]) + + if err != nil { + return results, err + } + + results, err = walkCollectionPath(x, paths[1:], results) + + if err != nil { + return results, err + } + } + default: + results = append(results, obj) + } + + return results, nil +} + +// CheckState implements the state check logic. +func (e *compareValueCollection) CheckState(ctx context.Context, req CheckStateRequest, resp *CheckStateResponse) { + var resourceOne *tfjson.StateResource + var resourceTwo *tfjson.StateResource + + if req.State == nil { + resp.Error = fmt.Errorf("state is nil") + + return + } + + if req.State.Values == nil { + resp.Error = fmt.Errorf("state does not contain any state values") + + return + } + + if req.State.Values.RootModule == nil { + resp.Error = fmt.Errorf("state does not contain a root module") + + return + } + + for _, r := range req.State.Values.RootModule.Resources { + if e.resourceAddressOne == r.Address { + resourceOne = r + + break + } + } + + if resourceOne == nil { + resp.Error = fmt.Errorf("%s - Resource not found in state", e.resourceAddressOne) + + return + } + + if len(e.collectionPath) == 0 { + resp.Error = fmt.Errorf("%s - No collection path was provided", e.resourceAddressOne) + + return + } + + resultOne, err := tfjsonpath.Traverse(resourceOne.AttributeValues, e.collectionPath[0]) + + if err != nil { + resp.Error = err + + return + } + + // Verify resultOne is a collection. + switch t := resultOne.(type) { + case []any, map[string]any: + // Collection found. + default: + var pathStr string + + for _, v := range e.collectionPath { + pathStr += fmt.Sprintf(".%s", v.String()) + } + + resp.Error = fmt.Errorf("%s%s is not a collection type: %T", e.resourceAddressOne, pathStr, t) + + return + } + + var results []any + + results, err = walkCollectionPath(resultOne, e.collectionPath[1:], results) + + if err != nil { + resp.Error = err + + return + } + + for _, r := range req.State.Values.RootModule.Resources { + if e.resourceAddressTwo == r.Address { + resourceTwo = r + + break + } + } + + if resourceTwo == nil { + resp.Error = fmt.Errorf("%s - Resource not found in state", e.resourceAddressTwo) + + return + } + + resultTwo, err := tfjsonpath.Traverse(resourceTwo.AttributeValues, e.attributePath) + + if err != nil { + resp.Error = err + + return + } + + var errs []error + + for _, v := range results { + switch resultTwo.(type) { + case []any: + errs = append(errs, e.comparer.CompareValues([]any{v}, resultTwo)) + default: + errs = append(errs, e.comparer.CompareValues(v, resultTwo)) + } + } + + for _, err = range errs { + if err == nil { + return + } + } + + errMsgs := map[string]struct{}{} + + for _, err = range errs { + if _, ok := errMsgs[err.Error()]; ok { + continue + } + + resp.Error = errors.Join(resp.Error, err) + + errMsgs[err.Error()] = struct{}{} + } +} + +// CompareValueCollection returns a state check that iterates over each element in a collection and compares the value of each element +// with the value of an attribute using the given value comparer. +func CompareValueCollection(resourceAddressOne string, collectionPath []tfjsonpath.Path, resourceAddressTwo string, attributePath tfjsonpath.Path, comparer compare.ValueComparer) StateCheck { + return &compareValueCollection{ + resourceAddressOne: resourceAddressOne, + collectionPath: collectionPath, + resourceAddressTwo: resourceAddressTwo, + attributePath: attributePath, + comparer: comparer, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/statecheck/compare_value_pairs.go b/vendor/github.com/hashicorp/terraform-plugin-testing/statecheck/compare_value_pairs.go new file mode 100644 index 00000000..8db67c56 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/statecheck/compare_value_pairs.go @@ -0,0 +1,111 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statecheck + +import ( + "context" + "fmt" + + tfjson "github.com/hashicorp/terraform-json" + + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" +) + +// Resource State Check +var _ StateCheck = &compareValuePairs{} + +type compareValuePairs struct { + resourceAddressOne string + attributePathOne tfjsonpath.Path + resourceAddressTwo string + attributePathTwo tfjsonpath.Path + comparer compare.ValueComparer +} + +// CheckState implements the state check logic. +func (e *compareValuePairs) CheckState(ctx context.Context, req CheckStateRequest, resp *CheckStateResponse) { + var resourceOne *tfjson.StateResource + var resourceTwo *tfjson.StateResource + + if req.State == nil { + resp.Error = fmt.Errorf("state is nil") + + return + } + + if req.State.Values == nil { + resp.Error = fmt.Errorf("state does not contain any state values") + + return + } + + if req.State.Values.RootModule == nil { + resp.Error = fmt.Errorf("state does not contain a root module") + + return + } + + for _, r := range req.State.Values.RootModule.Resources { + if e.resourceAddressOne == r.Address { + resourceOne = r + + break + } + } + + if resourceOne == nil { + resp.Error = fmt.Errorf("%s - Resource not found in state", e.resourceAddressOne) + + return + } + + resultOne, err := tfjsonpath.Traverse(resourceOne.AttributeValues, e.attributePathOne) + + if err != nil { + resp.Error = err + + return + } + + for _, r := range req.State.Values.RootModule.Resources { + if e.resourceAddressTwo == r.Address { + resourceTwo = r + + break + } + } + + if resourceTwo == nil { + resp.Error = fmt.Errorf("%s - Resource not found in state", e.resourceAddressTwo) + + return + } + + resultTwo, err := tfjsonpath.Traverse(resourceTwo.AttributeValues, e.attributePathTwo) + + if err != nil { + resp.Error = err + + return + } + + err = e.comparer.CompareValues(resultOne, resultTwo) + + if err != nil { + resp.Error = err + } +} + +// CompareValuePairs returns a state check that compares the value in state for the first given resource address and +// path with the value in state for the second given resource address and path using the supplied value comparer. +func CompareValuePairs(resourceAddressOne string, attributePathOne tfjsonpath.Path, resourceAddressTwo string, attributePathTwo tfjsonpath.Path, comparer compare.ValueComparer) StateCheck { + return &compareValuePairs{ + resourceAddressOne: resourceAddressOne, + attributePathOne: attributePathOne, + resourceAddressTwo: resourceAddressTwo, + attributePathTwo: attributePathTwo, + comparer: comparer, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_above.go b/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_above.go index 4734bcf6..bea04c67 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_above.go +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_above.go @@ -11,9 +11,19 @@ import ( ) // RequireAbove will fail the test if the Terraform CLI -// version is below the given version. For example, if given -// version.Must(version.NewVersion("0.15.0")), then 0.14.x or -// any other prior minor versions will fail the test. +// version is exclusively below the given version. For example, if given +// version.Must(version.NewVersion("1.8.0")), then 1.7.x or +// any other prior versions will fail the test. +// +// Prereleases of Terraform CLI (whether alpha, beta, or rc) are considered +// equal to a given patch version. For example, if given +// version.Must(version.NewVersion("1.8.0")), then 1.8.0-rc1 will run, not fail, +// the test. Terraform prereleases are considered as potential candidates for +// the upcoming version and therefore are treated as semantically equal for +// testing. If failing prereleases of the same patch release is desired, give a +// higher prerelease version. For example, if given +// version.Must(version.NewVersion("1.8.0-rc2")), then 1.8.0-rc1 will fail the +// test. func RequireAbove(minimumVersion *version.Version) TerraformVersionCheck { return requireAboveCheck{ minimumVersion: minimumVersion, @@ -27,8 +37,17 @@ type requireAboveCheck struct { // CheckTerraformVersion satisfies the TerraformVersionCheck interface. func (r requireAboveCheck) CheckTerraformVersion(ctx context.Context, req CheckTerraformVersionRequest, resp *CheckTerraformVersionResponse) { + var terraformVersion *version.Version - if req.TerraformVersion.LessThan(r.minimumVersion) { + // If given a prerelease version, check the Terraform CLI version directly, + // otherwise use the core version so that prereleases are treated as equal. + if r.minimumVersion.Prerelease() != "" { + terraformVersion = req.TerraformVersion + } else { + terraformVersion = req.TerraformVersion.Core() + } + + if terraformVersion.LessThan(r.minimumVersion) { resp.Error = fmt.Errorf("expected Terraform CLI version above %s but detected version is %s", r.minimumVersion, req.TerraformVersion) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_below.go b/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_below.go index 99efa534..1b1f2cfb 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_below.go +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_below.go @@ -11,9 +11,19 @@ import ( ) // RequireBelow will fail the test if the Terraform CLI -// version is above the given version. For example, if given -// version.Must(version.NewVersion("0.15.0")), then versions 0.15.x and +// version is inclusively above the given version. For example, if given +// version.Must(version.NewVersion("1.8.0")), then versions 1.8.x and // above will fail the test. +// +// Prereleases of Terraform CLI (whether alpha, beta, or rc) are considered +// equal to a given patch version. For example, if given +// version.Must(version.NewVersion("1.8.0")), then 1.8.0-rc1 will fail, not run, +// the test. Terraform prereleases are considered as potential candidates for +// the upcoming version and therefore are treated as semantically equal for +// testing purposes. If failing prereleases of the same patch release is +// desired, give a lower prerelease version. For example, if given +// version.Must(version.NewVersion("1.8.0-rc1")), then 1.8.0-rc2 will fail the +// test. func RequireBelow(maximumVersion *version.Version) TerraformVersionCheck { return requireBelowCheck{ maximumVersion: maximumVersion, @@ -27,8 +37,17 @@ type requireBelowCheck struct { // CheckTerraformVersion satisfies the TerraformVersionCheck interface. func (s requireBelowCheck) CheckTerraformVersion(ctx context.Context, req CheckTerraformVersionRequest, resp *CheckTerraformVersionResponse) { + var terraformVersion *version.Version - if req.TerraformVersion.GreaterThan(s.maximumVersion) { + // If given a prerelease version, check the Terraform CLI version directly, + // otherwise use the core version so that prereleases are treated as equal. + if s.maximumVersion.Prerelease() != "" { + terraformVersion = req.TerraformVersion + } else { + terraformVersion = req.TerraformVersion.Core() + } + + if terraformVersion.GreaterThanOrEqual(s.maximumVersion) { resp.Error = fmt.Errorf("expected Terraform CLI version below %s but detected version is %s", s.maximumVersion, req.TerraformVersion) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_between.go b/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_between.go index b9929792..ac015077 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_between.go +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_between.go @@ -12,9 +12,19 @@ import ( // RequireBetween will fail the test if the Terraform CLI // version is outside the given minimum (exclusive) and maximum (inclusive). -// For example, if given a minimum version of version.Must(version.NewVersion("0.15.0")) -// and a maximum version of version.Must(version.NewVersion("1.0.0")), then 0.15.x or -// any other prior versions and versions greater than 1.0.0 will fail the test. +// For example, if given a minimum version of version.Must(version.NewVersion("1.7.0")) +// and a maximum version of version.Must(version.NewVersion("1.8.0")), then 1.6.x or +// any other prior versions and versions greater than or equal to 1.8.0 will fail the test. +// +// Prereleases of Terraform CLI (whether alpha, beta, or rc) are considered +// equal to a given patch version. For example, if given a minimum version of +// version.Must(version.NewVersion("1.8.0")), then 1.8.0-rc1 will run, not fail, +// the test. Terraform prereleases are considered as potential candidates for +// the upcoming version and therefore are treated as semantically equal for +// testing purposes. If failing prereleases of the same patch release is +// desired, give a higher prerelease version. For example, if given a minimum +// version of version.Must(version.NewVersion("1.8.0-rc2")), then 1.8.0-rc1 will +// fail the test. func RequireBetween(minimumVersion, maximumVersion *version.Version) TerraformVersionCheck { return requireBetweenCheck{ minimumVersion: minimumVersion, @@ -30,8 +40,27 @@ type requireBetweenCheck struct { // CheckTerraformVersion satisfies the TerraformVersionCheck interface. func (s requireBetweenCheck) CheckTerraformVersion(ctx context.Context, req CheckTerraformVersionRequest, resp *CheckTerraformVersionResponse) { + var maxTerraformVersion, minTerraformVersion *version.Version - if req.TerraformVersion.LessThan(s.minimumVersion) || req.TerraformVersion.GreaterThanOrEqual(s.maximumVersion) { + // If given a prerelease maximum version, check the Terraform CLI version + // directly, otherwise use the core version so that prereleases are treated + // as equal. + if s.maximumVersion.Prerelease() != "" { + maxTerraformVersion = req.TerraformVersion + } else { + maxTerraformVersion = req.TerraformVersion.Core() + } + + // If given a prerelease minimum version, check the Terraform CLI version + // directly, otherwise use the core version so that prereleases are treated + // as equal. + if s.minimumVersion.Prerelease() != "" { + minTerraformVersion = req.TerraformVersion + } else { + minTerraformVersion = req.TerraformVersion.Core() + } + + if minTerraformVersion.LessThan(s.minimumVersion) || maxTerraformVersion.GreaterThanOrEqual(s.maximumVersion) { resp.Error = fmt.Errorf("expected Terraform CLI version between %s and %s but detected version is %s", s.minimumVersion, s.maximumVersion, req.TerraformVersion) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_not.go b/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_not.go index 18a9b68d..2addb281 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_not.go +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/require_not.go @@ -12,6 +12,16 @@ import ( // RequireNot will fail the test if the Terraform CLI // version matches the given version. +// +// Prereleases of Terraform CLI (whether alpha, beta, or rc) are considered +// equal to a given patch version. For example, if given +// version.Must(version.NewVersion("1.8.0")), then 1.8.0-rc1 will fail, not run, +// the test. Terraform prereleases are considered as potential candidates for +// the upcoming version and therefore are treated as semantically equal for +// testing purposes. If running prereleases of the same patch release is +// desired, give a different prerelease version. For example, if given +// version.Must(version.NewVersion("1.8.0-rc2")), then 1.8.0-rc1 will +// run the test. func RequireNot(version *version.Version) TerraformVersionCheck { return requireNotCheck{ version: version, @@ -25,8 +35,17 @@ type requireNotCheck struct { // CheckTerraformVersion satisfies the TerraformVersionCheck interface. func (s requireNotCheck) CheckTerraformVersion(ctx context.Context, req CheckTerraformVersionRequest, resp *CheckTerraformVersionResponse) { + var terraformVersion *version.Version - if req.TerraformVersion.Equal(s.version) { + // If given a prerelease version, check the Terraform CLI version directly, + // otherwise use the core version so that prereleases are treated as equal. + if s.version.Prerelease() != "" { + terraformVersion = req.TerraformVersion + } else { + terraformVersion = req.TerraformVersion.Core() + } + + if terraformVersion.Equal(s.version) { resp.Error = fmt.Errorf("unexpected Terraform CLI version: %s", s.version) } } diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_above.go b/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_above.go index ffc69b85..b56918c6 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_above.go +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_above.go @@ -11,9 +11,19 @@ import ( ) // SkipAbove will skip (pass) the test if the Terraform CLI -// version is below the given version. For example, if given -// version.Must(version.NewVersion("0.15.0")), then 0.14.x or -// any other prior minor versions will skip the test. +// version is exclusively above the given version. For example, if given +// version.Must(version.NewVersion("1.8.0")), then 1.8.x or +// any other later versions will skip the test. +// +// Prereleases of Terraform CLI (whether alpha, beta, or rc) are considered +// equal to a given patch version. For example, if given +// version.Must(version.NewVersion("1.8.0")), then 1.8.0-rc1 will run, not skip, +// the test. Terraform prereleases are considered as potential candidates for +// the upcoming version and therefore are treated as semantically equal for +// testing. If skipping prereleases of the same patch release is desired, give a +// lower prerelease version. For example, if given +// version.Must(version.NewVersion("1.8.0-rc1")), then 1.8.0-rc2 will skip the +// test. func SkipAbove(maximumVersion *version.Version) TerraformVersionCheck { return skipAboveCheck{ maximumVersion: maximumVersion, @@ -27,8 +37,17 @@ type skipAboveCheck struct { // CheckTerraformVersion satisfies the TerraformVersionCheck interface. func (s skipAboveCheck) CheckTerraformVersion(ctx context.Context, req CheckTerraformVersionRequest, resp *CheckTerraformVersionResponse) { + var terraformVersion *version.Version - if req.TerraformVersion.GreaterThan(s.maximumVersion) { + // If given a prerelease version, check the Terraform CLI version directly, + // otherwise use the core version so that prereleases are treated as equal. + if s.maximumVersion.Prerelease() != "" { + terraformVersion = req.TerraformVersion + } else { + terraformVersion = req.TerraformVersion.Core() + } + + if terraformVersion.GreaterThan(s.maximumVersion) { resp.Skip = fmt.Sprintf("Terraform CLI version %s is above maximum version %s: skipping test", req.TerraformVersion, s.maximumVersion) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_below.go b/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_below.go index 0b3dffdd..15cb8a49 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_below.go +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_below.go @@ -11,9 +11,19 @@ import ( ) // SkipBelow will skip (pass) the test if the Terraform CLI -// version is below the given version. For example, if given -// version.Must(version.NewVersion("0.15.0")), then 0.14.x or -// any other prior minor versions will skip the test. +// version is exclusively below the given version. For example, if given +// version.Must(version.NewVersion("1.8.0")), then 1.7.x or +// any other prior versions will skip the test. +// +// Prereleases of Terraform CLI (whether alpha, beta, or rc) are considered +// equal to a given patch version. For example, if given +// version.Must(version.NewVersion("1.8.0")), then 1.8.0-rc1 will run, not skip, +// the test. Terraform prereleases are considered as potential candidates for +// the upcoming version and therefore are treated as important for testing to +// run. If skipping prereleases of the same patch release is desired, give a +// higher prerelease version. For example, if given +// version.Must(version.NewVersion("1.8.0-rc2")), then 1.8.0-rc1 will skip the +// test. func SkipBelow(minimumVersion *version.Version) TerraformVersionCheck { return skipBelowCheck{ minimumVersion: minimumVersion, @@ -27,8 +37,17 @@ type skipBelowCheck struct { // CheckTerraformVersion satisfies the TerraformVersionCheck interface. func (s skipBelowCheck) CheckTerraformVersion(ctx context.Context, req CheckTerraformVersionRequest, resp *CheckTerraformVersionResponse) { + var terraformVersion *version.Version - if req.TerraformVersion.LessThan(s.minimumVersion) { + // If given a prerelease version, check the Terraform CLI version directly, + // otherwise use the core version so that prereleases are treated as equal. + if s.minimumVersion.Prerelease() != "" { + terraformVersion = req.TerraformVersion + } else { + terraformVersion = req.TerraformVersion.Core() + } + + if terraformVersion.LessThan(s.minimumVersion) { resp.Skip = fmt.Sprintf("Terraform CLI version %s is below minimum version %s: skipping test", req.TerraformVersion, s.minimumVersion) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_between.go b/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_between.go index fb6e9410..555ff79c 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_between.go +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_between.go @@ -12,9 +12,19 @@ import ( // SkipBetween will skip the test if the Terraform CLI // version is between the given minimum (inclusive) and maximum (exclusive). -// For example, if given a minimum version of version.Must(version.NewVersion("0.15.0")) -// and a maximum version of version.Must(version.NewVersion("0.16.0")), then versions 0.15.x +// For example, if given a minimum version of version.Must(version.NewVersion("1.7.0")) +// and a maximum version of version.Must(version.NewVersion("1.8.0")), then versions 1.7.x // will skip the test. +// +// Prereleases of Terraform CLI (whether alpha, beta, or rc) are considered +// equal to a given patch version. For example, if given a minimum version of +// version.Must(version.NewVersion("1.8.0")), then 1.8.0-rc1 will skip, not run, +// the test. Terraform prereleases are considered as potential candidates for +// the upcoming version and therefore are treated as semantically equal for +// testing purposes. If running prereleases of the same patch release is +// desired, give a higher prerelease version. For example, if given a minimum +// version of version.Must(version.NewVersion("1.8.0-rc2")), then 1.8.0-rc1 will +// run the test. func SkipBetween(minimumVersion, maximumVersion *version.Version) TerraformVersionCheck { return skipBetweenCheck{ minimumVersion: minimumVersion, @@ -30,8 +40,27 @@ type skipBetweenCheck struct { // CheckTerraformVersion satisfies the TerraformVersionCheck interface. func (s skipBetweenCheck) CheckTerraformVersion(ctx context.Context, req CheckTerraformVersionRequest, resp *CheckTerraformVersionResponse) { + var maxTerraformVersion, minTerraformVersion *version.Version - if req.TerraformVersion.GreaterThanOrEqual(s.minimumVersion) && req.TerraformVersion.LessThan(s.maximumVersion) { + // If given a prerelease maximum version, check the Terraform CLI version + // directly, otherwise use the core version so that prereleases are treated + // as equal. + if s.maximumVersion.Prerelease() != "" { + maxTerraformVersion = req.TerraformVersion + } else { + maxTerraformVersion = req.TerraformVersion.Core() + } + + // If given a prerelease minimum version, check the Terraform CLI version + // directly, otherwise use the core version so that prereleases are treated + // as equal. + if s.minimumVersion.Prerelease() != "" { + minTerraformVersion = req.TerraformVersion + } else { + minTerraformVersion = req.TerraformVersion.Core() + } + + if minTerraformVersion.GreaterThanOrEqual(s.minimumVersion) && maxTerraformVersion.LessThan(s.maximumVersion) { resp.Skip = fmt.Sprintf("Terraform CLI version %s is between %s and %s: skipping test.", req.TerraformVersion, s.minimumVersion, s.maximumVersion) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_if.go b/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_if.go index 6ece5e05..e5b7d96f 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_if.go +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_if.go @@ -12,6 +12,16 @@ import ( // SkipIf will skip (pass) the test if the Terraform CLI // version matches the given version. +// +// Prereleases of Terraform CLI (whether alpha, beta, or rc) are considered +// equal to a given patch version. For example, if given +// version.Must(version.NewVersion("1.8.0")), then 1.8.0-rc1 will skip, not run, +// the test. Terraform prereleases are considered as potential candidates for +// the upcoming version and therefore are treated as semantically equal for +// testing purposes. If running prereleases of the same patch release is +// desired, give a different prerelease version. For example, if given +// version.Must(version.NewVersion("1.8.0-rc2")), then 1.8.0-rc1 will +// run the test. func SkipIf(version *version.Version) TerraformVersionCheck { return skipIfCheck{ version: version, @@ -25,8 +35,17 @@ type skipIfCheck struct { // CheckTerraformVersion satisfies the TerraformVersionCheck interface. func (s skipIfCheck) CheckTerraformVersion(ctx context.Context, req CheckTerraformVersionRequest, resp *CheckTerraformVersionResponse) { + var terraformVersion *version.Version - if req.TerraformVersion.Equal(s.version) { + // If given a prerelease version, check the Terraform CLI version directly, + // otherwise use the core version so that prereleases are treated as equal. + if s.version.Prerelease() != "" { + terraformVersion = req.TerraformVersion + } else { + terraformVersion = req.TerraformVersion.Core() + } + + if terraformVersion.Equal(s.version) { resp.Skip = fmt.Sprintf("Terraform CLI version is %s: skipping test.", s.version) } } diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_if_not_prerelease.go b/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_if_not_prerelease.go new file mode 100644 index 00000000..c49400b2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/skip_if_not_prerelease.go @@ -0,0 +1,28 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfversion + +import ( + "context" + "fmt" +) + +// SkipIfNotPrerelease will skip (pass) the test if the Terraform CLI +// version does not include prerelease information. This will include builds +// of Terraform that are from source. (e.g. 1.8.0-dev) +func SkipIfNotPrerelease() TerraformVersionCheck { + return skipIfNotPrereleaseCheck{} +} + +// skipIfNotPrereleaseCheck implements the TerraformVersionCheck interface +type skipIfNotPrereleaseCheck struct{} + +// CheckTerraformVersion satisfies the TerraformVersionCheck interface. +func (s skipIfNotPrereleaseCheck) CheckTerraformVersion(ctx context.Context, req CheckTerraformVersionRequest, resp *CheckTerraformVersionResponse) { + if req.TerraformVersion.Prerelease() != "" { + return + } + + resp.Skip = fmt.Sprintf("Terraform CLI version %s is not a prerelease build: skipping test.", req.TerraformVersion) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/versions.go b/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/versions.go index 21f0727a..4dcf5d14 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/versions.go +++ b/vendor/github.com/hashicorp/terraform-plugin-testing/tfversion/versions.go @@ -35,4 +35,5 @@ var ( Version1_6_0 *version.Version = version.Must(version.NewVersion("1.6.0")) Version1_7_0 *version.Version = version.Must(version.NewVersion("1.7.0")) Version1_8_0 *version.Version = version.Must(version.NewVersion("1.8.0")) + Version1_9_0 *version.Version = version.Must(version.NewVersion("1.9.0")) ) diff --git a/vendor/github.com/russross/blackfriday/.gitignore b/vendor/github.com/russross/blackfriday/.gitignore deleted file mode 100644 index 75623dcc..00000000 --- a/vendor/github.com/russross/blackfriday/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -*.out -*.swp -*.8 -*.6 -_obj -_test* -markdown -tags diff --git a/vendor/github.com/russross/blackfriday/.travis.yml b/vendor/github.com/russross/blackfriday/.travis.yml deleted file mode 100644 index a49fff15..00000000 --- a/vendor/github.com/russross/blackfriday/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -sudo: false -language: go -go: - - "1.9.x" - - "1.10.x" - - "1.11.x" - - tip -matrix: - fast_finish: true - allow_failures: - - go: tip -install: - - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d -s .) - - go tool vet . - - go test -v -race ./... diff --git a/vendor/github.com/russross/blackfriday/LICENSE.txt b/vendor/github.com/russross/blackfriday/LICENSE.txt deleted file mode 100644 index 7fbb253a..00000000 --- a/vendor/github.com/russross/blackfriday/LICENSE.txt +++ /dev/null @@ -1,28 +0,0 @@ -Blackfriday is distributed under the Simplified BSD License: - -Copyright © 2011 Russ Ross -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided with - the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/russross/blackfriday/README.md b/vendor/github.com/russross/blackfriday/README.md deleted file mode 100644 index 997ef5d4..00000000 --- a/vendor/github.com/russross/blackfriday/README.md +++ /dev/null @@ -1,364 +0,0 @@ -Blackfriday -[![Build Status][BuildV2SVG]][BuildV2URL] -[![PkgGoDev][PkgGoDevV2SVG]][PkgGoDevV2URL] -=========== - -Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It -is paranoid about its input (so you can safely feed it user-supplied -data), it is fast, it supports common extensions (tables, smart -punctuation substitutions, etc.), and it is safe for all utf-8 -(unicode) input. - -HTML output is currently supported, along with Smartypants -extensions. - -It started as a translation from C of [Sundown][3]. - - -Installation ------------- - -Blackfriday is compatible with modern Go releases in module mode. -With Go installed: - - go get github.com/russross/blackfriday - -will resolve and add the package to the current development module, -then build and install it. Alternatively, you can achieve the same -if you import it in a package: - - import "github.com/russross/blackfriday" - -and `go get` without parameters. - -Old versions of Go and legacy GOPATH mode might work, -but no effort is made to keep them working. - - -Versions --------- - -Currently maintained and recommended version of Blackfriday is `v2`. It's being -developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the -documentation is available at -https://pkg.go.dev/github.com/russross/blackfriday/v2. - -It is `go get`-able in module mode at `github.com/russross/blackfriday/v2`. - -Version 2 offers a number of improvements over v1: - -* Cleaned up API -* A separate call to [`Parse`][4], which produces an abstract syntax tree for - the document -* Latest bug fixes -* Flexibility to easily add your own rendering extensions - -Potential drawbacks: - -* Our benchmarks show v2 to be slightly slower than v1. Currently in the - ballpark of around 15%. -* API breakage. If you can't afford modifying your code to adhere to the new API - and don't care too much about the new features, v2 is probably not for you. -* Several bug fixes are trailing behind and still need to be forward-ported to - v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for - tracking. - -If you are still interested in the legacy `v1`, you can import it from -`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found -here: https://pkg.go.dev/github.com/russross/blackfriday. - - -Usage ------ - -### v1 - -For basic usage, it is as simple as getting your input into a byte -slice and calling: - -```go -output := blackfriday.MarkdownBasic(input) -``` - -This renders it with no extensions enabled. To get a more useful -feature set, use this instead: - -```go -output := blackfriday.MarkdownCommon(input) -``` - -### v2 - -For the most sensible markdown processing, it is as simple as getting your input -into a byte slice and calling: - -```go -output := blackfriday.Run(input) -``` - -Your input will be parsed and the output rendered with a set of most popular -extensions enabled. If you want the most basic feature set, corresponding with -the bare Markdown specification, use: - -```go -output := blackfriday.Run(input, blackfriday.WithNoExtensions()) -``` - -### Sanitize untrusted content - -Blackfriday itself does nothing to protect against malicious content. If you are -dealing with user-supplied markdown, we recommend running Blackfriday's output -through HTML sanitizer such as [Bluemonday][5]. - -Here's an example of simple usage of Blackfriday together with Bluemonday: - -```go -import ( - "github.com/microcosm-cc/bluemonday" - "github.com/russross/blackfriday" -) - -// ... -unsafe := blackfriday.Run(input) -html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) -``` - -### Custom options, v1 - -If you want to customize the set of options, first get a renderer -(currently only the HTML output engine), then use it to -call the more general `Markdown` function. For examples, see the -implementations of `MarkdownBasic` and `MarkdownCommon` in -`markdown.go`. - -### Custom options, v2 - -If you want to customize the set of options, use `blackfriday.WithExtensions`, -`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`. - -### `blackfriday-tool` - -You can also check out `blackfriday-tool` for a more complete example -of how to use it. Download and install it using: - - go get github.com/russross/blackfriday-tool - -This is a simple command-line tool that allows you to process a -markdown file using a standalone program. You can also browse the -source directly on github if you are just looking for some example -code: - -* - -Note that if you have not already done so, installing -`blackfriday-tool` will be sufficient to download and install -blackfriday in addition to the tool itself. The tool binary will be -installed in `$GOPATH/bin`. This is a statically-linked binary that -can be copied to wherever you need it without worrying about -dependencies and library versions. - -### Sanitized anchor names - -Blackfriday includes an algorithm for creating sanitized anchor names -corresponding to a given input text. This algorithm is used to create -anchors for headings when `EXTENSION_AUTO_HEADER_IDS` is enabled. The -algorithm has a specification, so that other packages can create -compatible anchor names and links to those anchors. - -The specification is located at https://pkg.go.dev/github.com/russross/blackfriday#hdr-Sanitized_Anchor_Names. - -[`SanitizedAnchorName`](https://pkg.go.dev/github.com/russross/blackfriday#SanitizedAnchorName) exposes this functionality, and can be used to -create compatible links to the anchor names generated by blackfriday. -This algorithm is also implemented in a small standalone package at -[`github.com/shurcooL/sanitized_anchor_name`](https://pkg.go.dev/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients -that want a small package and don't need full functionality of blackfriday. - - -Features --------- - -All features of Sundown are supported, including: - -* **Compatibility**. The Markdown v1.0.3 test suite passes with - the `--tidy` option. Without `--tidy`, the differences are - mostly in whitespace and entity escaping, where blackfriday is - more consistent and cleaner. - -* **Common extensions**, including table support, fenced code - blocks, autolinks, strikethroughs, non-strict emphasis, etc. - -* **Safety**. Blackfriday is paranoid when parsing, making it safe - to feed untrusted user input without fear of bad things - happening. The test suite stress tests this and there are no - known inputs that make it crash. If you find one, please let me - know and send me the input that does it. - - NOTE: "safety" in this context means *runtime safety only*. In order to - protect yourself against JavaScript injection in untrusted content, see - [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content). - -* **Fast processing**. It is fast enough to render on-demand in - most web applications without having to cache the output. - -* **Thread safety**. You can run multiple parsers in different - goroutines without ill effect. There is no dependence on global - shared state. - -* **Minimal dependencies**. Blackfriday only depends on standard - library packages in Go. The source code is pretty - self-contained, so it is easy to add to any project, including - Google App Engine projects. - -* **Standards compliant**. Output successfully validates using the - W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional. - - -Extensions ----------- - -In addition to the standard markdown syntax, this package -implements the following extensions: - -* **Intra-word emphasis supression**. The `_` character is - commonly used inside words when discussing code, so having - markdown interpret it as an emphasis command is usually the - wrong thing. Blackfriday lets you treat all emphasis markers as - normal characters when they occur inside a word. - -* **Tables**. Tables can be created by drawing them in the input - using a simple syntax: - - ``` - Name | Age - --------|------ - Bob | 27 - Alice | 23 - ``` - -* **Fenced code blocks**. In addition to the normal 4-space - indentation to mark code blocks, you can explicitly mark them - and supply a language (to make syntax highlighting simple). Just - mark it like this: - - ```go - func getTrue() bool { - return true - } - ``` - - You can use 3 or more backticks to mark the beginning of the - block, and the same number to mark the end of the block. - - To preserve classes of fenced code blocks while using the bluemonday - HTML sanitizer, use the following policy: - - ```go - p := bluemonday.UGCPolicy() - p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code") - html := p.SanitizeBytes(unsafe) - ``` - -* **Definition lists**. A simple definition list is made of a single-line - term followed by a colon and the definition for that term. - - Cat - : Fluffy animal everyone likes - - Internet - : Vector of transmission for pictures of cats - - Terms must be separated from the previous definition by a blank line. - -* **Footnotes**. A marker in the text that will become a superscript number; - a footnote definition that will be placed in a list of footnotes at the - end of the document. A footnote looks like this: - - This is a footnote.[^1] - - [^1]: the footnote text. - -* **Autolinking**. Blackfriday can find URLs that have not been - explicitly marked as links and turn them into links. - -* **Strikethrough**. Use two tildes (`~~`) to mark text that - should be crossed out. - -* **Hard line breaks**. With this extension enabled (it is off by - default in the `MarkdownBasic` and `MarkdownCommon` convenience - functions), newlines in the input translate into line breaks in - the output. - -* **Smart quotes**. Smartypants-style punctuation substitution is - supported, turning normal double- and single-quote marks into - curly quotes, etc. - -* **LaTeX-style dash parsing** is an additional option, where `--` - is translated into `–`, and `---` is translated into - `—`. This differs from most smartypants processors, which - turn a single hyphen into an ndash and a double hyphen into an - mdash. - -* **Smart fractions**, where anything that looks like a fraction - is translated into suitable HTML (instead of just a few special - cases like most smartypant processors). For example, `4/5` - becomes `45`, which renders as - 45. - - -Other renderers ---------------- - -Blackfriday is structured to allow alternative rendering engines. Here -are a few of note: - -* [github_flavored_markdown](https://pkg.go.dev/github.com/shurcooL/github_flavored_markdown): - provides a GitHub Flavored Markdown renderer with fenced code block - highlighting, clickable heading anchor links. - - It's not customizable, and its goal is to produce HTML output - equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode), - except the rendering is performed locally. - -* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, - but for markdown. - -* [LaTeX output](https://gitlab.com/ambrevar/blackfriday-latex): - renders output as LaTeX. - -* [bfchroma](https://github.com/Depado/bfchroma/): provides convenience - integration with the [Chroma](https://github.com/alecthomas/chroma) code - highlighting library. bfchroma is only compatible with v2 of Blackfriday and - provides a drop-in renderer ready to use with Blackfriday, as well as - options and means for further customization. - -* [Blackfriday-Confluence](https://github.com/kentaro-m/blackfriday-confluence): provides a [Confluence Wiki Markup](https://confluence.atlassian.com/doc/confluence-wiki-markup-251003035.html) renderer. - -* [Blackfriday-Slack](https://github.com/karriereat/blackfriday-slack): converts markdown to slack message style - - -TODO ----- - -* More unit testing -* Improve Unicode support. It does not understand all Unicode - rules (about what constitutes a letter, a punctuation symbol, - etc.), so it may fail to detect word boundaries correctly in - some instances. It is safe on all UTF-8 input. - - -License -------- - -[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt) - - - [1]: https://daringfireball.net/projects/markdown/ "Markdown" - [2]: https://golang.org/ "Go Language" - [3]: https://github.com/vmg/sundown "Sundown" - [4]: https://pkg.go.dev/github.com/russross/blackfriday/v2#Parse "Parse func" - [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday" - - [BuildV2SVG]: https://travis-ci.org/russross/blackfriday.svg?branch=v2 - [BuildV2URL]: https://travis-ci.org/russross/blackfriday - [PkgGoDevV2SVG]: https://pkg.go.dev/badge/github.com/russross/blackfriday/v2 - [PkgGoDevV2URL]: https://pkg.go.dev/github.com/russross/blackfriday/v2 diff --git a/vendor/github.com/russross/blackfriday/block.go b/vendor/github.com/russross/blackfriday/block.go deleted file mode 100644 index 563cb290..00000000 --- a/vendor/github.com/russross/blackfriday/block.go +++ /dev/null @@ -1,1480 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. -// - -// -// Functions to parse block-level elements. -// - -package blackfriday - -import ( - "bytes" - "strings" - "unicode" -) - -// Parse block-level data. -// Note: this function and many that it calls assume that -// the input buffer ends with a newline. -func (p *parser) block(out *bytes.Buffer, data []byte) { - if len(data) == 0 || data[len(data)-1] != '\n' { - panic("block input is missing terminating newline") - } - - // this is called recursively: enforce a maximum depth - if p.nesting >= p.maxNesting { - return - } - p.nesting++ - - // parse out one block-level construct at a time - for len(data) > 0 { - // prefixed header: - // - // # Header 1 - // ## Header 2 - // ... - // ###### Header 6 - if p.isPrefixHeader(data) { - data = data[p.prefixHeader(out, data):] - continue - } - - // block of preformatted HTML: - // - //
- // ... - //
- if data[0] == '<' { - if i := p.html(out, data, true); i > 0 { - data = data[i:] - continue - } - } - - // title block - // - // % stuff - // % more stuff - // % even more stuff - if p.flags&EXTENSION_TITLEBLOCK != 0 { - if data[0] == '%' { - if i := p.titleBlock(out, data, true); i > 0 { - data = data[i:] - continue - } - } - } - - // blank lines. note: returns the # of bytes to skip - if i := p.isEmpty(data); i > 0 { - data = data[i:] - continue - } - - // indented code block: - // - // func max(a, b int) int { - // if a > b { - // return a - // } - // return b - // } - if p.codePrefix(data) > 0 { - data = data[p.code(out, data):] - continue - } - - // fenced code block: - // - // ``` go info string here - // func fact(n int) int { - // if n <= 1 { - // return n - // } - // return n * fact(n-1) - // } - // ``` - if p.flags&EXTENSION_FENCED_CODE != 0 { - if i := p.fencedCodeBlock(out, data, true); i > 0 { - data = data[i:] - continue - } - } - - // horizontal rule: - // - // ------ - // or - // ****** - // or - // ______ - if p.isHRule(data) { - p.r.HRule(out) - var i int - for i = 0; data[i] != '\n'; i++ { - } - data = data[i:] - continue - } - - // block quote: - // - // > A big quote I found somewhere - // > on the web - if p.quotePrefix(data) > 0 { - data = data[p.quote(out, data):] - continue - } - - // table: - // - // Name | Age | Phone - // ------|-----|--------- - // Bob | 31 | 555-1234 - // Alice | 27 | 555-4321 - if p.flags&EXTENSION_TABLES != 0 { - if i := p.table(out, data); i > 0 { - data = data[i:] - continue - } - } - - // an itemized/unordered list: - // - // * Item 1 - // * Item 2 - // - // also works with + or - - if p.uliPrefix(data) > 0 { - data = data[p.list(out, data, 0):] - continue - } - - // a numbered/ordered list: - // - // 1. Item 1 - // 2. Item 2 - if p.oliPrefix(data) > 0 { - data = data[p.list(out, data, LIST_TYPE_ORDERED):] - continue - } - - // definition lists: - // - // Term 1 - // : Definition a - // : Definition b - // - // Term 2 - // : Definition c - if p.flags&EXTENSION_DEFINITION_LISTS != 0 { - if p.dliPrefix(data) > 0 { - data = data[p.list(out, data, LIST_TYPE_DEFINITION):] - continue - } - } - - // anything else must look like a normal paragraph - // note: this finds underlined headers, too - data = data[p.paragraph(out, data):] - } - - p.nesting-- -} - -func (p *parser) isPrefixHeader(data []byte) bool { - if data[0] != '#' { - return false - } - - if p.flags&EXTENSION_SPACE_HEADERS != 0 { - level := 0 - for level < 6 && data[level] == '#' { - level++ - } - if data[level] != ' ' { - return false - } - } - return true -} - -func (p *parser) prefixHeader(out *bytes.Buffer, data []byte) int { - level := 0 - for level < 6 && data[level] == '#' { - level++ - } - i := skipChar(data, level, ' ') - end := skipUntilChar(data, i, '\n') - skip := end - id := "" - if p.flags&EXTENSION_HEADER_IDS != 0 { - j, k := 0, 0 - // find start/end of header id - for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { - } - for k = j + 1; k < end && data[k] != '}'; k++ { - } - // extract header id iff found - if j < end && k < end { - id = string(data[j+2 : k]) - end = j - skip = k + 1 - for end > 0 && data[end-1] == ' ' { - end-- - } - } - } - for end > 0 && data[end-1] == '#' { - if isBackslashEscaped(data, end-1) { - break - } - end-- - } - for end > 0 && data[end-1] == ' ' { - end-- - } - if end > i { - if id == "" && p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { - id = SanitizedAnchorName(string(data[i:end])) - } - work := func() bool { - p.inline(out, data[i:end]) - return true - } - p.r.Header(out, work, level, id) - } - return skip -} - -func (p *parser) isUnderlinedHeader(data []byte) int { - // test of level 1 header - if data[0] == '=' { - i := skipChar(data, 1, '=') - i = skipChar(data, i, ' ') - if data[i] == '\n' { - return 1 - } else { - return 0 - } - } - - // test of level 2 header - if data[0] == '-' { - i := skipChar(data, 1, '-') - i = skipChar(data, i, ' ') - if data[i] == '\n' { - return 2 - } else { - return 0 - } - } - - return 0 -} - -func (p *parser) titleBlock(out *bytes.Buffer, data []byte, doRender bool) int { - if data[0] != '%' { - return 0 - } - splitData := bytes.Split(data, []byte("\n")) - var i int - for idx, b := range splitData { - if !bytes.HasPrefix(b, []byte("%")) { - i = idx // - 1 - break - } - } - - data = bytes.Join(splitData[0:i], []byte("\n")) - p.r.TitleBlock(out, data) - - return len(data) -} - -func (p *parser) html(out *bytes.Buffer, data []byte, doRender bool) int { - var i, j int - - // identify the opening tag - if data[0] != '<' { - return 0 - } - curtag, tagfound := p.htmlFindTag(data[1:]) - - // handle special cases - if !tagfound { - // check for an HTML comment - if size := p.htmlComment(out, data, doRender); size > 0 { - return size - } - - // check for an
tag - if size := p.htmlHr(out, data, doRender); size > 0 { - return size - } - - // check for HTML CDATA - if size := p.htmlCDATA(out, data, doRender); size > 0 { - return size - } - - // no special case recognized - return 0 - } - - // look for an unindented matching closing tag - // followed by a blank line - found := false - /* - closetag := []byte("\n") - j = len(curtag) + 1 - for !found { - // scan for a closing tag at the beginning of a line - if skip := bytes.Index(data[j:], closetag); skip >= 0 { - j += skip + len(closetag) - } else { - break - } - - // see if it is the only thing on the line - if skip := p.isEmpty(data[j:]); skip > 0 { - // see if it is followed by a blank line/eof - j += skip - if j >= len(data) { - found = true - i = j - } else { - if skip := p.isEmpty(data[j:]); skip > 0 { - j += skip - found = true - i = j - } - } - } - } - */ - - // if not found, try a second pass looking for indented match - // but not if tag is "ins" or "del" (following original Markdown.pl) - if !found && curtag != "ins" && curtag != "del" { - i = 1 - for i < len(data) { - i++ - for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { - i++ - } - - if i+2+len(curtag) >= len(data) { - break - } - - j = p.htmlFindEnd(curtag, data[i-1:]) - - if j > 0 { - i += j - 1 - found = true - break - } - } - } - - if !found { - return 0 - } - - // the end of the block has been found - if doRender { - // trim newlines - end := i - for end > 0 && data[end-1] == '\n' { - end-- - } - p.r.BlockHtml(out, data[:end]) - } - - return i -} - -func (p *parser) renderHTMLBlock(out *bytes.Buffer, data []byte, start int, doRender bool) int { - // html block needs to end with a blank line - if i := p.isEmpty(data[start:]); i > 0 { - size := start + i - if doRender { - // trim trailing newlines - end := size - for end > 0 && data[end-1] == '\n' { - end-- - } - p.r.BlockHtml(out, data[:end]) - } - return size - } - return 0 -} - -// HTML comment, lax form -func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int { - i := p.inlineHTMLComment(out, data) - return p.renderHTMLBlock(out, data, i, doRender) -} - -// HTML CDATA section -func (p *parser) htmlCDATA(out *bytes.Buffer, data []byte, doRender bool) int { - const cdataTag = "') { - i++ - } - i++ - // no end-of-comment marker - if i >= len(data) { - return 0 - } - return p.renderHTMLBlock(out, data, i, doRender) -} - -// HR, which is the only self-closing block tag considered -func (p *parser) htmlHr(out *bytes.Buffer, data []byte, doRender bool) int { - if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { - return 0 - } - if data[3] != ' ' && data[3] != '/' && data[3] != '>' { - // not an
tag after all; at least not a valid one - return 0 - } - - i := 3 - for data[i] != '>' && data[i] != '\n' { - i++ - } - - if data[i] == '>' { - return p.renderHTMLBlock(out, data, i+1, doRender) - } - - return 0 -} - -func (p *parser) htmlFindTag(data []byte) (string, bool) { - i := 0 - for isalnum(data[i]) { - i++ - } - key := string(data[:i]) - if _, ok := blockTags[key]; ok { - return key, true - } - return "", false -} - -func (p *parser) htmlFindEnd(tag string, data []byte) int { - // assume data[0] == '<' && data[1] == '/' already tested - - // check if tag is a match - closetag := []byte("") - if !bytes.HasPrefix(data, closetag) { - return 0 - } - i := len(closetag) - - // check that the rest of the line is blank - skip := 0 - if skip = p.isEmpty(data[i:]); skip == 0 { - return 0 - } - i += skip - skip = 0 - - if i >= len(data) { - return i - } - - if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { - return i - } - if skip = p.isEmpty(data[i:]); skip == 0 { - // following line must be blank - return 0 - } - - return i + skip -} - -func (*parser) isEmpty(data []byte) int { - // it is okay to call isEmpty on an empty buffer - if len(data) == 0 { - return 0 - } - - var i int - for i = 0; i < len(data) && data[i] != '\n'; i++ { - if data[i] != ' ' && data[i] != '\t' { - return 0 - } - } - return i + 1 -} - -func (*parser) isHRule(data []byte) bool { - i := 0 - - // skip up to three spaces - for i < 3 && data[i] == ' ' { - i++ - } - - // look at the hrule char - if data[i] != '*' && data[i] != '-' && data[i] != '_' { - return false - } - c := data[i] - - // the whole line must be the char or whitespace - n := 0 - for data[i] != '\n' { - switch { - case data[i] == c: - n++ - case data[i] != ' ': - return false - } - i++ - } - - return n >= 3 -} - -// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data, -// and returns the end index if so, or 0 otherwise. It also returns the marker found. -// If syntax is not nil, it gets set to the syntax specified in the fence line. -// A final newline is mandatory to recognize the fence line, unless newlineOptional is true. -func isFenceLine(data []byte, info *string, oldmarker string, newlineOptional bool) (end int, marker string) { - i, size := 0, 0 - - // skip up to three spaces - for i < len(data) && i < 3 && data[i] == ' ' { - i++ - } - - // check for the marker characters: ~ or ` - if i >= len(data) { - return 0, "" - } - if data[i] != '~' && data[i] != '`' { - return 0, "" - } - - c := data[i] - - // the whole line must be the same char or whitespace - for i < len(data) && data[i] == c { - size++ - i++ - } - - // the marker char must occur at least 3 times - if size < 3 { - return 0, "" - } - marker = string(data[i-size : i]) - - // if this is the end marker, it must match the beginning marker - if oldmarker != "" && marker != oldmarker { - return 0, "" - } - - // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here - // into one, always get the info string, and discard it if the caller doesn't care. - if info != nil { - infoLength := 0 - i = skipChar(data, i, ' ') - - if i >= len(data) { - if newlineOptional && i == len(data) { - return i, marker - } - return 0, "" - } - - infoStart := i - - if data[i] == '{' { - i++ - infoStart++ - - for i < len(data) && data[i] != '}' && data[i] != '\n' { - infoLength++ - i++ - } - - if i >= len(data) || data[i] != '}' { - return 0, "" - } - - // strip all whitespace at the beginning and the end - // of the {} block - for infoLength > 0 && isspace(data[infoStart]) { - infoStart++ - infoLength-- - } - - for infoLength > 0 && isspace(data[infoStart+infoLength-1]) { - infoLength-- - } - - i++ - } else { - for i < len(data) && !isverticalspace(data[i]) { - infoLength++ - i++ - } - } - - *info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength])) - } - - i = skipChar(data, i, ' ') - if i >= len(data) { - if newlineOptional { - return i, marker - } - return 0, "" - } - if data[i] == '\n' { - i++ // Take newline into account - } - - return i, marker -} - -// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, -// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. -// If doRender is true, a final newline is mandatory to recognize the fenced code block. -func (p *parser) fencedCodeBlock(out *bytes.Buffer, data []byte, doRender bool) int { - var infoString string - beg, marker := isFenceLine(data, &infoString, "", false) - if beg == 0 || beg >= len(data) { - return 0 - } - - var work bytes.Buffer - - for { - // safe to assume beg < len(data) - - // check for the end of the code block - newlineOptional := !doRender - fenceEnd, _ := isFenceLine(data[beg:], nil, marker, newlineOptional) - if fenceEnd != 0 { - beg += fenceEnd - break - } - - // copy the current line - end := skipUntilChar(data, beg, '\n') + 1 - - // did we reach the end of the buffer without a closing marker? - if end >= len(data) { - return 0 - } - - // verbatim copy to the working buffer - if doRender { - work.Write(data[beg:end]) - } - beg = end - } - - if doRender { - p.r.BlockCode(out, work.Bytes(), infoString) - } - - return beg -} - -func (p *parser) table(out *bytes.Buffer, data []byte) int { - var header bytes.Buffer - i, columns := p.tableHeader(&header, data) - if i == 0 { - return 0 - } - - var body bytes.Buffer - - for i < len(data) { - pipes, rowStart := 0, i - for ; data[i] != '\n'; i++ { - if data[i] == '|' { - pipes++ - } - } - - if pipes == 0 { - i = rowStart - break - } - - // include the newline in data sent to tableRow - i++ - p.tableRow(&body, data[rowStart:i], columns, false) - } - - p.r.Table(out, header.Bytes(), body.Bytes(), columns) - - return i -} - -// check if the specified position is preceded by an odd number of backslashes -func isBackslashEscaped(data []byte, i int) bool { - backslashes := 0 - for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { - backslashes++ - } - return backslashes&1 == 1 -} - -func (p *parser) tableHeader(out *bytes.Buffer, data []byte) (size int, columns []int) { - i := 0 - colCount := 1 - for i = 0; data[i] != '\n'; i++ { - if data[i] == '|' && !isBackslashEscaped(data, i) { - colCount++ - } - } - - // doesn't look like a table header - if colCount == 1 { - return - } - - // include the newline in the data sent to tableRow - header := data[:i+1] - - // column count ignores pipes at beginning or end of line - if data[0] == '|' { - colCount-- - } - if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { - colCount-- - } - - columns = make([]int, colCount) - - // move on to the header underline - i++ - if i >= len(data) { - return - } - - if data[i] == '|' && !isBackslashEscaped(data, i) { - i++ - } - i = skipChar(data, i, ' ') - - // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 - // and trailing | optional on last column - col := 0 - for data[i] != '\n' { - dashes := 0 - - if data[i] == ':' { - i++ - columns[col] |= TABLE_ALIGNMENT_LEFT - dashes++ - } - for data[i] == '-' { - i++ - dashes++ - } - if data[i] == ':' { - i++ - columns[col] |= TABLE_ALIGNMENT_RIGHT - dashes++ - } - for data[i] == ' ' { - i++ - } - - // end of column test is messy - switch { - case dashes < 3: - // not a valid column - return - - case data[i] == '|' && !isBackslashEscaped(data, i): - // marker found, now skip past trailing whitespace - col++ - i++ - for data[i] == ' ' { - i++ - } - - // trailing junk found after last column - if col >= colCount && data[i] != '\n' { - return - } - - case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: - // something else found where marker was required - return - - case data[i] == '\n': - // marker is optional for the last column - col++ - - default: - // trailing junk found after last column - return - } - } - if col != colCount { - return - } - - p.tableRow(out, header, columns, true) - size = i + 1 - return -} - -func (p *parser) tableRow(out *bytes.Buffer, data []byte, columns []int, header bool) { - i, col := 0, 0 - var rowWork bytes.Buffer - - if data[i] == '|' && !isBackslashEscaped(data, i) { - i++ - } - - for col = 0; col < len(columns) && i < len(data); col++ { - for data[i] == ' ' { - i++ - } - - cellStart := i - - for (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { - i++ - } - - cellEnd := i - - // skip the end-of-cell marker, possibly taking us past end of buffer - i++ - - for cellEnd > cellStart && data[cellEnd-1] == ' ' { - cellEnd-- - } - - var cellWork bytes.Buffer - p.inline(&cellWork, data[cellStart:cellEnd]) - - if header { - p.r.TableHeaderCell(&rowWork, cellWork.Bytes(), columns[col]) - } else { - p.r.TableCell(&rowWork, cellWork.Bytes(), columns[col]) - } - } - - // pad it out with empty columns to get the right number - for ; col < len(columns); col++ { - if header { - p.r.TableHeaderCell(&rowWork, nil, columns[col]) - } else { - p.r.TableCell(&rowWork, nil, columns[col]) - } - } - - // silently ignore rows with too many cells - - p.r.TableRow(out, rowWork.Bytes()) -} - -// returns blockquote prefix length -func (p *parser) quotePrefix(data []byte) int { - i := 0 - for i < 3 && data[i] == ' ' { - i++ - } - if data[i] == '>' { - if data[i+1] == ' ' { - return i + 2 - } - return i + 1 - } - return 0 -} - -// blockquote ends with at least one blank line -// followed by something without a blockquote prefix -func (p *parser) terminateBlockquote(data []byte, beg, end int) bool { - if p.isEmpty(data[beg:]) <= 0 { - return false - } - if end >= len(data) { - return true - } - return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 -} - -// parse a blockquote fragment -func (p *parser) quote(out *bytes.Buffer, data []byte) int { - var raw bytes.Buffer - beg, end := 0, 0 - for beg < len(data) { - end = beg - // Step over whole lines, collecting them. While doing that, check for - // fenced code and if one's found, incorporate it altogether, - // irregardless of any contents inside it - for data[end] != '\n' { - if p.flags&EXTENSION_FENCED_CODE != 0 { - if i := p.fencedCodeBlock(out, data[end:], false); i > 0 { - // -1 to compensate for the extra end++ after the loop: - end += i - 1 - break - } - } - end++ - } - end++ - - if pre := p.quotePrefix(data[beg:]); pre > 0 { - // skip the prefix - beg += pre - } else if p.terminateBlockquote(data, beg, end) { - break - } - - // this line is part of the blockquote - raw.Write(data[beg:end]) - beg = end - } - - var cooked bytes.Buffer - p.block(&cooked, raw.Bytes()) - p.r.BlockQuote(out, cooked.Bytes()) - return end -} - -// returns prefix length for block code -func (p *parser) codePrefix(data []byte) int { - if data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { - return 4 - } - return 0 -} - -func (p *parser) code(out *bytes.Buffer, data []byte) int { - var work bytes.Buffer - - i := 0 - for i < len(data) { - beg := i - for data[i] != '\n' { - i++ - } - i++ - - blankline := p.isEmpty(data[beg:i]) > 0 - if pre := p.codePrefix(data[beg:i]); pre > 0 { - beg += pre - } else if !blankline { - // non-empty, non-prefixed line breaks the pre - i = beg - break - } - - // verbatim copy to the working buffeu - if blankline { - work.WriteByte('\n') - } else { - work.Write(data[beg:i]) - } - } - - // trim all the \n off the end of work - workbytes := work.Bytes() - eol := len(workbytes) - for eol > 0 && workbytes[eol-1] == '\n' { - eol-- - } - if eol != len(workbytes) { - work.Truncate(eol) - } - - work.WriteByte('\n') - - p.r.BlockCode(out, work.Bytes(), "") - - return i -} - -// returns unordered list item prefix -func (p *parser) uliPrefix(data []byte) int { - i := 0 - - // start with up to 3 spaces - for i < 3 && data[i] == ' ' { - i++ - } - - // need a *, +, or - followed by a space - if (data[i] != '*' && data[i] != '+' && data[i] != '-') || - data[i+1] != ' ' { - return 0 - } - return i + 2 -} - -// returns ordered list item prefix -func (p *parser) oliPrefix(data []byte) int { - i := 0 - - // start with up to 3 spaces - for i < 3 && data[i] == ' ' { - i++ - } - - // count the digits - start := i - for data[i] >= '0' && data[i] <= '9' { - i++ - } - - // we need >= 1 digits followed by a dot and a space - if start == i || data[i] != '.' || data[i+1] != ' ' { - return 0 - } - return i + 2 -} - -// returns definition list item prefix -func (p *parser) dliPrefix(data []byte) int { - i := 0 - - // need a : followed by a spaces - if data[i] != ':' || data[i+1] != ' ' { - return 0 - } - for data[i] == ' ' { - i++ - } - return i + 2 -} - -// parse ordered or unordered list block -func (p *parser) list(out *bytes.Buffer, data []byte, flags int) int { - i := 0 - flags |= LIST_ITEM_BEGINNING_OF_LIST - work := func() bool { - for i < len(data) { - skip := p.listItem(out, data[i:], &flags) - i += skip - - if skip == 0 || flags&LIST_ITEM_END_OF_LIST != 0 { - break - } - flags &= ^LIST_ITEM_BEGINNING_OF_LIST - } - return true - } - - p.r.List(out, work, flags) - return i -} - -// Parse a single list item. -// Assumes initial prefix is already removed if this is a sublist. -func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int { - // keep track of the indentation of the first line - itemIndent := 0 - for itemIndent < 3 && data[itemIndent] == ' ' { - itemIndent++ - } - - i := p.uliPrefix(data) - if i == 0 { - i = p.oliPrefix(data) - } - if i == 0 { - i = p.dliPrefix(data) - // reset definition term flag - if i > 0 { - *flags &= ^LIST_TYPE_TERM - } - } - if i == 0 { - // if in defnition list, set term flag and continue - if *flags&LIST_TYPE_DEFINITION != 0 { - *flags |= LIST_TYPE_TERM - } else { - return 0 - } - } - - // skip leading whitespace on first line - for data[i] == ' ' { - i++ - } - - // find the end of the line - line := i - for i > 0 && data[i-1] != '\n' { - i++ - } - - // process the following lines - containsBlankLine := false - sublist := 0 - codeBlockMarker := "" - if p.flags&EXTENSION_FENCED_CODE != 0 && i > line { - // determine if codeblock starts on the first line - _, codeBlockMarker = isFenceLine(data[line:i], nil, "", false) - } - - // get working buffer - var raw bytes.Buffer - - // put the first line into the working buffer - raw.Write(data[line:i]) - line = i - -gatherlines: - for line < len(data) { - i++ - - // find the end of this line - for data[i-1] != '\n' { - i++ - } - // if it is an empty line, guess that it is part of this item - // and move on to the next line - if p.isEmpty(data[line:i]) > 0 { - containsBlankLine = true - raw.Write(data[line:i]) - line = i - continue - } - - // calculate the indentation - indent := 0 - for indent < 4 && line+indent < i && data[line+indent] == ' ' { - indent++ - } - - chunk := data[line+indent : i] - - if p.flags&EXTENSION_FENCED_CODE != 0 { - // determine if in or out of codeblock - // if in codeblock, ignore normal list processing - _, marker := isFenceLine(chunk, nil, codeBlockMarker, false) - if marker != "" { - if codeBlockMarker == "" { - // start of codeblock - codeBlockMarker = marker - } else { - // end of codeblock. - *flags |= LIST_ITEM_CONTAINS_BLOCK - codeBlockMarker = "" - } - } - // we are in a codeblock, write line, and continue - if codeBlockMarker != "" || marker != "" { - raw.Write(data[line+indent : i]) - line = i - continue gatherlines - } - } - - // evaluate how this line fits in - switch { - // is this a nested list item? - case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || - p.oliPrefix(chunk) > 0 || - p.dliPrefix(chunk) > 0: - - if containsBlankLine { - // end the list if the type changed after a blank line - if indent <= itemIndent && - ((*flags&LIST_TYPE_ORDERED != 0 && p.uliPrefix(chunk) > 0) || - (*flags&LIST_TYPE_ORDERED == 0 && p.oliPrefix(chunk) > 0)) { - - *flags |= LIST_ITEM_END_OF_LIST - break gatherlines - } - *flags |= LIST_ITEM_CONTAINS_BLOCK - } - - // to be a nested list, it must be indented more - // if not, it is the next item in the same list - if indent <= itemIndent { - break gatherlines - } - - // is this the first item in the nested list? - if sublist == 0 { - sublist = raw.Len() - } - - // is this a nested prefix header? - case p.isPrefixHeader(chunk): - // if the header is not indented, it is not nested in the list - // and thus ends the list - if containsBlankLine && indent < 4 { - *flags |= LIST_ITEM_END_OF_LIST - break gatherlines - } - *flags |= LIST_ITEM_CONTAINS_BLOCK - - // anything following an empty line is only part - // of this item if it is indented 4 spaces - // (regardless of the indentation of the beginning of the item) - case containsBlankLine && indent < 4: - if *flags&LIST_TYPE_DEFINITION != 0 && i < len(data)-1 { - // is the next item still a part of this list? - next := i - for data[next] != '\n' { - next++ - } - for next < len(data)-1 && data[next] == '\n' { - next++ - } - if i < len(data)-1 && data[i] != ':' && data[next] != ':' { - *flags |= LIST_ITEM_END_OF_LIST - } - } else { - *flags |= LIST_ITEM_END_OF_LIST - } - break gatherlines - - // a blank line means this should be parsed as a block - case containsBlankLine: - *flags |= LIST_ITEM_CONTAINS_BLOCK - } - - containsBlankLine = false - - // add the line into the working buffer without prefix - raw.Write(data[line+indent : i]) - - line = i - } - - // If reached end of data, the Renderer.ListItem call we're going to make below - // is definitely the last in the list. - if line >= len(data) { - *flags |= LIST_ITEM_END_OF_LIST - } - - rawBytes := raw.Bytes() - - // render the contents of the list item - var cooked bytes.Buffer - if *flags&LIST_ITEM_CONTAINS_BLOCK != 0 && *flags&LIST_TYPE_TERM == 0 { - // intermediate render of block item, except for definition term - if sublist > 0 { - p.block(&cooked, rawBytes[:sublist]) - p.block(&cooked, rawBytes[sublist:]) - } else { - p.block(&cooked, rawBytes) - } - } else { - // intermediate render of inline item - if sublist > 0 { - p.inline(&cooked, rawBytes[:sublist]) - p.block(&cooked, rawBytes[sublist:]) - } else { - p.inline(&cooked, rawBytes) - } - } - - // render the actual list item - cookedBytes := cooked.Bytes() - parsedEnd := len(cookedBytes) - - // strip trailing newlines - for parsedEnd > 0 && cookedBytes[parsedEnd-1] == '\n' { - parsedEnd-- - } - p.r.ListItem(out, cookedBytes[:parsedEnd], *flags) - - return line -} - -// render a single paragraph that has already been parsed out -func (p *parser) renderParagraph(out *bytes.Buffer, data []byte) { - if len(data) == 0 { - return - } - - // trim leading spaces - beg := 0 - for data[beg] == ' ' { - beg++ - } - - // trim trailing newline - end := len(data) - 1 - - // trim trailing spaces - for end > beg && data[end-1] == ' ' { - end-- - } - - work := func() bool { - p.inline(out, data[beg:end]) - return true - } - p.r.Paragraph(out, work) -} - -func (p *parser) paragraph(out *bytes.Buffer, data []byte) int { - // prev: index of 1st char of previous line - // line: index of 1st char of current line - // i: index of cursor/end of current line - var prev, line, i int - - // keep going until we find something to mark the end of the paragraph - for i < len(data) { - // mark the beginning of the current line - prev = line - current := data[i:] - line = i - - // did we find a blank line marking the end of the paragraph? - if n := p.isEmpty(current); n > 0 { - // did this blank line followed by a definition list item? - if p.flags&EXTENSION_DEFINITION_LISTS != 0 { - if i < len(data)-1 && data[i+1] == ':' { - return p.list(out, data[prev:], LIST_TYPE_DEFINITION) - } - } - - p.renderParagraph(out, data[:i]) - return i + n - } - - // an underline under some text marks a header, so our paragraph ended on prev line - if i > 0 { - if level := p.isUnderlinedHeader(current); level > 0 { - // render the paragraph - p.renderParagraph(out, data[:prev]) - - // ignore leading and trailing whitespace - eol := i - 1 - for prev < eol && data[prev] == ' ' { - prev++ - } - for eol > prev && data[eol-1] == ' ' { - eol-- - } - - // render the header - // this ugly double closure avoids forcing variables onto the heap - work := func(o *bytes.Buffer, pp *parser, d []byte) func() bool { - return func() bool { - pp.inline(o, d) - return true - } - }(out, p, data[prev:eol]) - - id := "" - if p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { - id = SanitizedAnchorName(string(data[prev:eol])) - } - - p.r.Header(out, work, level, id) - - // find the end of the underline - for data[i] != '\n' { - i++ - } - return i - } - } - - // if the next line starts a block of HTML, then the paragraph ends here - if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { - if data[i] == '<' && p.html(out, current, false) > 0 { - // rewind to before the HTML block - p.renderParagraph(out, data[:i]) - return i - } - } - - // if there's a prefixed header or a horizontal rule after this, paragraph is over - if p.isPrefixHeader(current) || p.isHRule(current) { - p.renderParagraph(out, data[:i]) - return i - } - - // if there's a fenced code block, paragraph is over - if p.flags&EXTENSION_FENCED_CODE != 0 { - if p.fencedCodeBlock(out, current, false) > 0 { - p.renderParagraph(out, data[:i]) - return i - } - } - - // if there's a definition list item, prev line is a definition term - if p.flags&EXTENSION_DEFINITION_LISTS != 0 { - if p.dliPrefix(current) != 0 { - return p.list(out, data[prev:], LIST_TYPE_DEFINITION) - } - } - - // if there's a list after this, paragraph is over - if p.flags&EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK != 0 { - if p.uliPrefix(current) != 0 || - p.oliPrefix(current) != 0 || - p.quotePrefix(current) != 0 || - p.codePrefix(current) != 0 { - p.renderParagraph(out, data[:i]) - return i - } - } - - // otherwise, scan to the beginning of the next line - for data[i] != '\n' { - i++ - } - i++ - } - - p.renderParagraph(out, data[:i]) - return i -} - -// SanitizedAnchorName returns a sanitized anchor name for the given text. -// -// It implements the algorithm specified in the package comment. -func SanitizedAnchorName(text string) string { - var anchorName []rune - futureDash := false - for _, r := range text { - switch { - case unicode.IsLetter(r) || unicode.IsNumber(r): - if futureDash && len(anchorName) > 0 { - anchorName = append(anchorName, '-') - } - futureDash = false - anchorName = append(anchorName, unicode.ToLower(r)) - default: - futureDash = true - } - } - return string(anchorName) -} diff --git a/vendor/github.com/russross/blackfriday/doc.go b/vendor/github.com/russross/blackfriday/doc.go deleted file mode 100644 index 9656c42a..00000000 --- a/vendor/github.com/russross/blackfriday/doc.go +++ /dev/null @@ -1,32 +0,0 @@ -// Package blackfriday is a Markdown processor. -// -// It translates plain text with simple formatting rules into HTML or LaTeX. -// -// Sanitized Anchor Names -// -// Blackfriday includes an algorithm for creating sanitized anchor names -// corresponding to a given input text. This algorithm is used to create -// anchors for headings when EXTENSION_AUTO_HEADER_IDS is enabled. The -// algorithm is specified below, so that other packages can create -// compatible anchor names and links to those anchors. -// -// The algorithm iterates over the input text, interpreted as UTF-8, -// one Unicode code point (rune) at a time. All runes that are letters (category L) -// or numbers (category N) are considered valid characters. They are mapped to -// lower case, and included in the output. All other runes are considered -// invalid characters. Invalid characters that preceed the first valid character, -// as well as invalid character that follow the last valid character -// are dropped completely. All other sequences of invalid characters -// between two valid characters are replaced with a single dash character '-'. -// -// SanitizedAnchorName exposes this functionality, and can be used to -// create compatible links to the anchor names generated by blackfriday. -// This algorithm is also implemented in a small standalone package at -// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients -// that want a small package and don't need full functionality of blackfriday. -package blackfriday - -// NOTE: Keep Sanitized Anchor Name algorithm in sync with package -// github.com/shurcooL/sanitized_anchor_name. -// Otherwise, users of sanitized_anchor_name will get anchor names -// that are incompatible with those generated by blackfriday. diff --git a/vendor/github.com/russross/blackfriday/html.go b/vendor/github.com/russross/blackfriday/html.go deleted file mode 100644 index fa044ca2..00000000 --- a/vendor/github.com/russross/blackfriday/html.go +++ /dev/null @@ -1,945 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. -// - -// -// -// HTML rendering backend -// -// - -package blackfriday - -import ( - "bytes" - "fmt" - "regexp" - "strconv" - "strings" -) - -// Html renderer configuration options. -const ( - HTML_SKIP_HTML = 1 << iota // skip preformatted HTML blocks - HTML_SKIP_STYLE // skip embedded